Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / arch / mips / kvm / kvm_mips_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/inst.h>
26
27 #undef CONFIG_MIPS_MT
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
30
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
34
35 #include "trace.h"
36
37 /*
38  * Compute the return address and do emulate branch simulation, if required.
39  * This function should be called only in branch delay slot active.
40  */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42         unsigned long instpc)
43 {
44         unsigned int dspcontrol;
45         union mips_instruction insn;
46         struct kvm_vcpu_arch *arch = &vcpu->arch;
47         long epc = instpc;
48         long nextpc = KVM_INVALID_INST;
49
50         if (epc & 3)
51                 goto unaligned;
52
53         /*
54          * Read the instruction
55          */
56         insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58         if (insn.word == KVM_INVALID_INST)
59                 return KVM_INVALID_INST;
60
61         switch (insn.i_format.opcode) {
62                 /*
63                  * jr and jalr are in r_format format.
64                  */
65         case spec_op:
66                 switch (insn.r_format.func) {
67                 case jalr_op:
68                         arch->gprs[insn.r_format.rd] = epc + 8;
69                         /* Fall through */
70                 case jr_op:
71                         nextpc = arch->gprs[insn.r_format.rs];
72                         break;
73                 }
74                 break;
75
76                 /*
77                  * This group contains:
78                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
79                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80                  */
81         case bcond_op:
82                 switch (insn.i_format.rt) {
83                 case bltz_op:
84                 case bltzl_op:
85                         if ((long)arch->gprs[insn.i_format.rs] < 0)
86                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
87                         else
88                                 epc += 8;
89                         nextpc = epc;
90                         break;
91
92                 case bgez_op:
93                 case bgezl_op:
94                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
95                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
96                         else
97                                 epc += 8;
98                         nextpc = epc;
99                         break;
100
101                 case bltzal_op:
102                 case bltzall_op:
103                         arch->gprs[31] = epc + 8;
104                         if ((long)arch->gprs[insn.i_format.rs] < 0)
105                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
106                         else
107                                 epc += 8;
108                         nextpc = epc;
109                         break;
110
111                 case bgezal_op:
112                 case bgezall_op:
113                         arch->gprs[31] = epc + 8;
114                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
115                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
116                         else
117                                 epc += 8;
118                         nextpc = epc;
119                         break;
120                 case bposge32_op:
121                         if (!cpu_has_dsp)
122                                 goto sigill;
123
124                         dspcontrol = rddsp(0x01);
125
126                         if (dspcontrol >= 32) {
127                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
128                         } else
129                                 epc += 8;
130                         nextpc = epc;
131                         break;
132                 }
133                 break;
134
135                 /*
136                  * These are unconditional and in j_format.
137                  */
138         case jal_op:
139                 arch->gprs[31] = instpc + 8;
140         case j_op:
141                 epc += 4;
142                 epc >>= 28;
143                 epc <<= 28;
144                 epc |= (insn.j_format.target << 2);
145                 nextpc = epc;
146                 break;
147
148                 /*
149                  * These are conditional and in i_format.
150                  */
151         case beq_op:
152         case beql_op:
153                 if (arch->gprs[insn.i_format.rs] ==
154                     arch->gprs[insn.i_format.rt])
155                         epc = epc + 4 + (insn.i_format.simmediate << 2);
156                 else
157                         epc += 8;
158                 nextpc = epc;
159                 break;
160
161         case bne_op:
162         case bnel_op:
163                 if (arch->gprs[insn.i_format.rs] !=
164                     arch->gprs[insn.i_format.rt])
165                         epc = epc + 4 + (insn.i_format.simmediate << 2);
166                 else
167                         epc += 8;
168                 nextpc = epc;
169                 break;
170
171         case blez_op:           /* not really i_format */
172         case blezl_op:
173                 /* rt field assumed to be zero */
174                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175                         epc = epc + 4 + (insn.i_format.simmediate << 2);
176                 else
177                         epc += 8;
178                 nextpc = epc;
179                 break;
180
181         case bgtz_op:
182         case bgtzl_op:
183                 /* rt field assumed to be zero */
184                 if ((long)arch->gprs[insn.i_format.rs] > 0)
185                         epc = epc + 4 + (insn.i_format.simmediate << 2);
186                 else
187                         epc += 8;
188                 nextpc = epc;
189                 break;
190
191                 /*
192                  * And now the FPA/cp1 branch instructions.
193                  */
194         case cop1_op:
195                 printk("%s: unsupported cop1_op\n", __func__);
196                 break;
197         }
198
199         return nextpc;
200
201 unaligned:
202         printk("%s: unaligned epc\n", __func__);
203         return nextpc;
204
205 sigill:
206         printk("%s: DSP branch but not DSP ASE\n", __func__);
207         return nextpc;
208 }
209
210 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211 {
212         unsigned long branch_pc;
213         enum emulation_result er = EMULATE_DONE;
214
215         if (cause & CAUSEF_BD) {
216                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217                 if (branch_pc == KVM_INVALID_INST) {
218                         er = EMULATE_FAIL;
219                 } else {
220                         vcpu->arch.pc = branch_pc;
221                         kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222                 }
223         } else
224                 vcpu->arch.pc += 4;
225
226         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228         return er;
229 }
230
231 /* Everytime the compare register is written to, we need to decide when to fire
232  * the timer that represents timer ticks to the GUEST.
233  *
234  */
235 enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236 {
237         struct mips_coproc *cop0 = vcpu->arch.cop0;
238         enum emulation_result er = EMULATE_DONE;
239
240         /* If COUNT is enabled */
241         if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243                 hrtimer_start(&vcpu->arch.comparecount_timer,
244                               ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245         } else {
246                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247         }
248
249         return er;
250 }
251
252 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253 {
254         struct mips_coproc *cop0 = vcpu->arch.cop0;
255         enum emulation_result er = EMULATE_DONE;
256
257         if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259                           kvm_read_c0_guest_epc(cop0));
260                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263         } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266         } else {
267                 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268                        vcpu->arch.pc);
269                 er = EMULATE_FAIL;
270         }
271
272         return er;
273 }
274
275 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276 {
277         enum emulation_result er = EMULATE_DONE;
278
279         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280                   vcpu->arch.pending_exceptions);
281
282         ++vcpu->stat.wait_exits;
283         trace_kvm_exit(vcpu, WAIT_EXITS);
284         if (!vcpu->arch.pending_exceptions) {
285                 vcpu->arch.wait = 1;
286                 kvm_vcpu_block(vcpu);
287
288                 /* We we are runnable, then definitely go off to user space to check if any
289                  * I/O interrupts are pending.
290                  */
291                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294                 }
295         }
296
297         return er;
298 }
299
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301  * this, if things ever change
302  */
303 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304 {
305         struct mips_coproc *cop0 = vcpu->arch.cop0;
306         enum emulation_result er = EMULATE_FAIL;
307         uint32_t pc = vcpu->arch.pc;
308
309         printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310         return er;
311 }
312
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315 {
316         struct mips_coproc *cop0 = vcpu->arch.cop0;
317         int index = kvm_read_c0_guest_index(cop0);
318         enum emulation_result er = EMULATE_DONE;
319         struct kvm_mips_tlb *tlb = NULL;
320         uint32_t pc = vcpu->arch.pc;
321
322         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323                 printk("%s: illegal index: %d\n", __func__, index);
324                 printk
325                     ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326                      pc, index, kvm_read_c0_guest_entryhi(cop0),
327                      kvm_read_c0_guest_entrylo0(cop0),
328                      kvm_read_c0_guest_entrylo1(cop0),
329                      kvm_read_c0_guest_pagemask(cop0));
330                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331         }
332
333         tlb = &vcpu->arch.guest_tlb[index];
334 #if 1
335         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337 #endif
338
339         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344         kvm_debug
345             ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346              pc, index, kvm_read_c0_guest_entryhi(cop0),
347              kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348              kvm_read_c0_guest_pagemask(cop0));
349
350         return er;
351 }
352
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355 {
356         struct mips_coproc *cop0 = vcpu->arch.cop0;
357         enum emulation_result er = EMULATE_DONE;
358         struct kvm_mips_tlb *tlb = NULL;
359         uint32_t pc = vcpu->arch.pc;
360         int index;
361
362 #if 1
363         get_random_bytes(&index, sizeof(index));
364         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365 #else
366         index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367 #endif
368
369         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370                 printk("%s: illegal index: %d\n", __func__, index);
371                 return EMULATE_FAIL;
372         }
373
374         tlb = &vcpu->arch.guest_tlb[index];
375
376 #if 1
377         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379 #endif
380
381         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386         kvm_debug
387             ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388              pc, index, kvm_read_c0_guest_entryhi(cop0),
389              kvm_read_c0_guest_entrylo0(cop0),
390              kvm_read_c0_guest_entrylo1(cop0));
391
392         return er;
393 }
394
395 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396 {
397         struct mips_coproc *cop0 = vcpu->arch.cop0;
398         long entryhi = kvm_read_c0_guest_entryhi(cop0);
399         enum emulation_result er = EMULATE_DONE;
400         uint32_t pc = vcpu->arch.pc;
401         int index = -1;
402
403         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405         kvm_write_c0_guest_index(cop0, index);
406
407         kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408                   index);
409
410         return er;
411 }
412
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415                      struct kvm_run *run, struct kvm_vcpu *vcpu)
416 {
417         struct mips_coproc *cop0 = vcpu->arch.cop0;
418         enum emulation_result er = EMULATE_DONE;
419         int32_t rt, rd, copz, sel, co_bit, op;
420         uint32_t pc = vcpu->arch.pc;
421         unsigned long curr_pc;
422
423         /*
424          * Update PC and hold onto current PC in case there is
425          * an error and we want to rollback the PC
426          */
427         curr_pc = vcpu->arch.pc;
428         er = update_pc(vcpu, cause);
429         if (er == EMULATE_FAIL) {
430                 return er;
431         }
432
433         copz = (inst >> 21) & 0x1f;
434         rt = (inst >> 16) & 0x1f;
435         rd = (inst >> 11) & 0x1f;
436         sel = inst & 0x7;
437         co_bit = (inst >> 25) & 1;
438
439         /* Verify that the register is valid */
440         if (rd > MIPS_CP0_DESAVE) {
441                 printk("Invalid rd: %d\n", rd);
442                 er = EMULATE_FAIL;
443                 goto done;
444         }
445
446         if (co_bit) {
447                 op = (inst) & 0xff;
448
449                 switch (op) {
450                 case tlbr_op:   /*  Read indexed TLB entry  */
451                         er = kvm_mips_emul_tlbr(vcpu);
452                         break;
453                 case tlbwi_op:  /*  Write indexed  */
454                         er = kvm_mips_emul_tlbwi(vcpu);
455                         break;
456                 case tlbwr_op:  /*  Write random  */
457                         er = kvm_mips_emul_tlbwr(vcpu);
458                         break;
459                 case tlbp_op:   /* TLB Probe */
460                         er = kvm_mips_emul_tlbp(vcpu);
461                         break;
462                 case rfe_op:
463                         printk("!!!COP0_RFE!!!\n");
464                         break;
465                 case eret_op:
466                         er = kvm_mips_emul_eret(vcpu);
467                         goto dont_update_pc;
468                         break;
469                 case wait_op:
470                         er = kvm_mips_emul_wait(vcpu);
471                         break;
472                 }
473         } else {
474                 switch (copz) {
475                 case mfc_op:
476 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477                         cop0->stat[rd][sel]++;
478 #endif
479                         /* Get reg */
480                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481                                 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482                                 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484                                 vcpu->arch.gprs[rt] = 0x0;
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
487 #endif
488                         }
489                         else {
490                                 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
493                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
494 #endif
495                         }
496
497                         kvm_debug
498                             ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499                              pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501                         break;
502
503                 case dmfc_op:
504                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505                         break;
506
507                 case mtc_op:
508 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509                         cop0->stat[rd][sel]++;
510 #endif
511                         if ((rd == MIPS_CP0_TLB_INDEX)
512                             && (vcpu->arch.gprs[rt] >=
513                                 KVM_MIPS_GUEST_TLB_SIZE)) {
514                                 printk("Invalid TLB Index: %ld",
515                                        vcpu->arch.gprs[rt]);
516                                 er = EMULATE_FAIL;
517                                 break;
518                         }
519 #define C0_EBASE_CORE_MASK 0xff
520                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521                                 /* Preserve CORE number */
522                                 kvm_change_c0_guest_ebase(cop0,
523                                                           ~(C0_EBASE_CORE_MASK),
524                                                           vcpu->arch.gprs[rt]);
525                                 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526                                        kvm_read_c0_guest_ebase(cop0));
527                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528                                 uint32_t nasid =
529                                     vcpu->arch.gprs[rt] & ASID_MASK;
530                                 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
531                                     &&
532                                     ((kvm_read_c0_guest_entryhi(cop0) &
533                                       ASID_MASK) != nasid)) {
534
535                                         kvm_debug
536                                             ("MTCz, change ASID from %#lx to %#lx\n",
537                                              kvm_read_c0_guest_entryhi(cop0) &
538                                              ASID_MASK,
539                                              vcpu->arch.gprs[rt] & ASID_MASK);
540
541                                         /* Blow away the shadow host TLBs */
542                                         kvm_mips_flush_host_tlb(1);
543                                 }
544                                 kvm_write_c0_guest_entryhi(cop0,
545                                                            vcpu->arch.gprs[rt]);
546                         }
547                         /* Are we writing to COUNT */
548                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
549                                 /* Linux doesn't seem to write into COUNT, we throw an error
550                                  * if we notice a write to COUNT
551                                  */
552                                 /*er = EMULATE_FAIL; */
553                                 goto done;
554                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
555                                 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
556                                           pc, kvm_read_c0_guest_compare(cop0),
557                                           vcpu->arch.gprs[rt]);
558
559                                 /* If we are writing to COMPARE */
560                                 /* Clear pending timer interrupt, if any */
561                                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
562                                 kvm_write_c0_guest_compare(cop0,
563                                                            vcpu->arch.gprs[rt]);
564                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
565                                 kvm_write_c0_guest_status(cop0,
566                                                           vcpu->arch.gprs[rt]);
567                                 /* Make sure that CU1 and NMI bits are never set */
568                                 kvm_clear_c0_guest_status(cop0,
569                                                           (ST0_CU1 | ST0_NMI));
570
571 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
572                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
573 #endif
574                         } else {
575                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
576 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
577                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
578 #endif
579                         }
580
581                         kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
582                                   rd, sel, cop0->reg[rd][sel]);
583                         break;
584
585                 case dmtc_op:
586                         printk
587                             ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
588                              vcpu->arch.pc, rt, rd, sel);
589                         er = EMULATE_FAIL;
590                         break;
591
592                 case mfmcz_op:
593 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
594                         cop0->stat[MIPS_CP0_STATUS][0]++;
595 #endif
596                         if (rt != 0) {
597                                 vcpu->arch.gprs[rt] =
598                                     kvm_read_c0_guest_status(cop0);
599                         }
600                         /* EI */
601                         if (inst & 0x20) {
602                                 kvm_debug("[%#lx] mfmcz_op: EI\n",
603                                           vcpu->arch.pc);
604                                 kvm_set_c0_guest_status(cop0, ST0_IE);
605                         } else {
606                                 kvm_debug("[%#lx] mfmcz_op: DI\n",
607                                           vcpu->arch.pc);
608                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
609                         }
610
611                         break;
612
613                 case wrpgpr_op:
614                         {
615                                 uint32_t css =
616                                     cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
617                                 uint32_t pss =
618                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
619                                 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
620                                 if (css || pss) {
621                                         er = EMULATE_FAIL;
622                                         break;
623                                 }
624                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
625                                           vcpu->arch.gprs[rt]);
626                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
627                         }
628                         break;
629                 default:
630                         printk
631                             ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
632                              vcpu->arch.pc, copz);
633                         er = EMULATE_FAIL;
634                         break;
635                 }
636         }
637
638 done:
639         /*
640          * Rollback PC only if emulation was unsuccessful
641          */
642         if (er == EMULATE_FAIL) {
643                 vcpu->arch.pc = curr_pc;
644         }
645
646 dont_update_pc:
647         /*
648          * This is for special instructions whose emulation
649          * updates the PC, so do not overwrite the PC under
650          * any circumstances
651          */
652
653         return er;
654 }
655
656 enum emulation_result
657 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
658                        struct kvm_run *run, struct kvm_vcpu *vcpu)
659 {
660         enum emulation_result er = EMULATE_DO_MMIO;
661         int32_t op, base, rt, offset;
662         uint32_t bytes;
663         void *data = run->mmio.data;
664         unsigned long curr_pc;
665
666         /*
667          * Update PC and hold onto current PC in case there is
668          * an error and we want to rollback the PC
669          */
670         curr_pc = vcpu->arch.pc;
671         er = update_pc(vcpu, cause);
672         if (er == EMULATE_FAIL)
673                 return er;
674
675         rt = (inst >> 16) & 0x1f;
676         base = (inst >> 21) & 0x1f;
677         offset = inst & 0xffff;
678         op = (inst >> 26) & 0x3f;
679
680         switch (op) {
681         case sb_op:
682                 bytes = 1;
683                 if (bytes > sizeof(run->mmio.data)) {
684                         kvm_err("%s: bad MMIO length: %d\n", __func__,
685                                run->mmio.len);
686                 }
687                 run->mmio.phys_addr =
688                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
689                                                    host_cp0_badvaddr);
690                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
691                         er = EMULATE_FAIL;
692                         break;
693                 }
694                 run->mmio.len = bytes;
695                 run->mmio.is_write = 1;
696                 vcpu->mmio_needed = 1;
697                 vcpu->mmio_is_write = 1;
698                 *(u8 *) data = vcpu->arch.gprs[rt];
699                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
700                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
701                           *(uint8_t *) data);
702
703                 break;
704
705         case sw_op:
706                 bytes = 4;
707                 if (bytes > sizeof(run->mmio.data)) {
708                         kvm_err("%s: bad MMIO length: %d\n", __func__,
709                                run->mmio.len);
710                 }
711                 run->mmio.phys_addr =
712                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
713                                                    host_cp0_badvaddr);
714                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
715                         er = EMULATE_FAIL;
716                         break;
717                 }
718
719                 run->mmio.len = bytes;
720                 run->mmio.is_write = 1;
721                 vcpu->mmio_needed = 1;
722                 vcpu->mmio_is_write = 1;
723                 *(uint32_t *) data = vcpu->arch.gprs[rt];
724
725                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
726                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
727                           vcpu->arch.gprs[rt], *(uint32_t *) data);
728                 break;
729
730         case sh_op:
731                 bytes = 2;
732                 if (bytes > sizeof(run->mmio.data)) {
733                         kvm_err("%s: bad MMIO length: %d\n", __func__,
734                                run->mmio.len);
735                 }
736                 run->mmio.phys_addr =
737                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
738                                                    host_cp0_badvaddr);
739                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
740                         er = EMULATE_FAIL;
741                         break;
742                 }
743
744                 run->mmio.len = bytes;
745                 run->mmio.is_write = 1;
746                 vcpu->mmio_needed = 1;
747                 vcpu->mmio_is_write = 1;
748                 *(uint16_t *) data = vcpu->arch.gprs[rt];
749
750                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
751                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
752                           vcpu->arch.gprs[rt], *(uint32_t *) data);
753                 break;
754
755         default:
756                 printk("Store not yet supported");
757                 er = EMULATE_FAIL;
758                 break;
759         }
760
761         /*
762          * Rollback PC if emulation was unsuccessful
763          */
764         if (er == EMULATE_FAIL) {
765                 vcpu->arch.pc = curr_pc;
766         }
767
768         return er;
769 }
770
771 enum emulation_result
772 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
773                       struct kvm_run *run, struct kvm_vcpu *vcpu)
774 {
775         enum emulation_result er = EMULATE_DO_MMIO;
776         int32_t op, base, rt, offset;
777         uint32_t bytes;
778
779         rt = (inst >> 16) & 0x1f;
780         base = (inst >> 21) & 0x1f;
781         offset = inst & 0xffff;
782         op = (inst >> 26) & 0x3f;
783
784         vcpu->arch.pending_load_cause = cause;
785         vcpu->arch.io_gpr = rt;
786
787         switch (op) {
788         case lw_op:
789                 bytes = 4;
790                 if (bytes > sizeof(run->mmio.data)) {
791                         kvm_err("%s: bad MMIO length: %d\n", __func__,
792                                run->mmio.len);
793                         er = EMULATE_FAIL;
794                         break;
795                 }
796                 run->mmio.phys_addr =
797                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
798                                                    host_cp0_badvaddr);
799                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
800                         er = EMULATE_FAIL;
801                         break;
802                 }
803
804                 run->mmio.len = bytes;
805                 run->mmio.is_write = 0;
806                 vcpu->mmio_needed = 1;
807                 vcpu->mmio_is_write = 0;
808                 break;
809
810         case lh_op:
811         case lhu_op:
812                 bytes = 2;
813                 if (bytes > sizeof(run->mmio.data)) {
814                         kvm_err("%s: bad MMIO length: %d\n", __func__,
815                                run->mmio.len);
816                         er = EMULATE_FAIL;
817                         break;
818                 }
819                 run->mmio.phys_addr =
820                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
821                                                    host_cp0_badvaddr);
822                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
823                         er = EMULATE_FAIL;
824                         break;
825                 }
826
827                 run->mmio.len = bytes;
828                 run->mmio.is_write = 0;
829                 vcpu->mmio_needed = 1;
830                 vcpu->mmio_is_write = 0;
831
832                 if (op == lh_op)
833                         vcpu->mmio_needed = 2;
834                 else
835                         vcpu->mmio_needed = 1;
836
837                 break;
838
839         case lbu_op:
840         case lb_op:
841                 bytes = 1;
842                 if (bytes > sizeof(run->mmio.data)) {
843                         kvm_err("%s: bad MMIO length: %d\n", __func__,
844                                run->mmio.len);
845                         er = EMULATE_FAIL;
846                         break;
847                 }
848                 run->mmio.phys_addr =
849                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
850                                                    host_cp0_badvaddr);
851                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
852                         er = EMULATE_FAIL;
853                         break;
854                 }
855
856                 run->mmio.len = bytes;
857                 run->mmio.is_write = 0;
858                 vcpu->mmio_is_write = 0;
859
860                 if (op == lb_op)
861                         vcpu->mmio_needed = 2;
862                 else
863                         vcpu->mmio_needed = 1;
864
865                 break;
866
867         default:
868                 printk("Load not yet supported");
869                 er = EMULATE_FAIL;
870                 break;
871         }
872
873         return er;
874 }
875
876 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
877 {
878         unsigned long offset = (va & ~PAGE_MASK);
879         struct kvm *kvm = vcpu->kvm;
880         unsigned long pa;
881         gfn_t gfn;
882         pfn_t pfn;
883
884         gfn = va >> PAGE_SHIFT;
885
886         if (gfn >= kvm->arch.guest_pmap_npages) {
887                 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
888                 kvm_mips_dump_host_tlbs();
889                 kvm_arch_vcpu_dump_regs(vcpu);
890                 return -1;
891         }
892         pfn = kvm->arch.guest_pmap[gfn];
893         pa = (pfn << PAGE_SHIFT) | offset;
894
895         printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
896
897         mips32_SyncICache(CKSEG0ADDR(pa), 32);
898         return 0;
899 }
900
901 #define MIPS_CACHE_OP_INDEX_INV         0x0
902 #define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
903 #define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
904 #define MIPS_CACHE_OP_IMP               0x3
905 #define MIPS_CACHE_OP_HIT_INV           0x4
906 #define MIPS_CACHE_OP_FILL_WB_INV       0x5
907 #define MIPS_CACHE_OP_HIT_HB            0x6
908 #define MIPS_CACHE_OP_FETCH_LOCK        0x7
909
910 #define MIPS_CACHE_ICACHE               0x0
911 #define MIPS_CACHE_DCACHE               0x1
912 #define MIPS_CACHE_SEC                  0x3
913
914 enum emulation_result
915 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
916                        struct kvm_run *run, struct kvm_vcpu *vcpu)
917 {
918         struct mips_coproc *cop0 = vcpu->arch.cop0;
919         extern void (*r4k_blast_dcache) (void);
920         extern void (*r4k_blast_icache) (void);
921         enum emulation_result er = EMULATE_DONE;
922         int32_t offset, cache, op_inst, op, base;
923         struct kvm_vcpu_arch *arch = &vcpu->arch;
924         unsigned long va;
925         unsigned long curr_pc;
926
927         /*
928          * Update PC and hold onto current PC in case there is
929          * an error and we want to rollback the PC
930          */
931         curr_pc = vcpu->arch.pc;
932         er = update_pc(vcpu, cause);
933         if (er == EMULATE_FAIL)
934                 return er;
935
936         base = (inst >> 21) & 0x1f;
937         op_inst = (inst >> 16) & 0x1f;
938         offset = inst & 0xffff;
939         cache = (inst >> 16) & 0x3;
940         op = (inst >> 18) & 0x7;
941
942         va = arch->gprs[base] + offset;
943
944         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
945                   cache, op, base, arch->gprs[base], offset);
946
947         /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
948          * the caches entirely by stepping through all the ways/indexes
949          */
950         if (op == MIPS_CACHE_OP_INDEX_INV) {
951                 kvm_debug
952                     ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
953                      vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
954                      arch->gprs[base], offset);
955
956                 if (cache == MIPS_CACHE_DCACHE)
957                         r4k_blast_dcache();
958                 else if (cache == MIPS_CACHE_ICACHE)
959                         r4k_blast_icache();
960                 else {
961                         printk("%s: unsupported CACHE INDEX operation\n",
962                                __func__);
963                         return EMULATE_FAIL;
964                 }
965
966 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
967                 kvm_mips_trans_cache_index(inst, opc, vcpu);
968 #endif
969                 goto done;
970         }
971
972         preempt_disable();
973         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
974
975                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
976                         kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
977                 }
978         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
979                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
980                 int index;
981
982                 /* If an entry already exists then skip */
983                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
984                         goto skip_fault;
985                 }
986
987                 /* If address not in the guest TLB, then give the guest a fault, the
988                  * resulting handler will do the right thing
989                  */
990                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
991                                                   (kvm_read_c0_guest_entryhi
992                                                    (cop0) & ASID_MASK));
993
994                 if (index < 0) {
995                         vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
996                         vcpu->arch.host_cp0_badvaddr = va;
997                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
998                                                          vcpu);
999                         preempt_enable();
1000                         goto dont_update_pc;
1001                 } else {
1002                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1003                         /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1004                         if (!TLB_IS_VALID(*tlb, va)) {
1005                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1006                                                                 run, vcpu);
1007                                 preempt_enable();
1008                                 goto dont_update_pc;
1009                         } else {
1010                                 /* We fault an entry from the guest tlb to the shadow host TLB */
1011                                 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1012                                                                      NULL,
1013                                                                      NULL);
1014                         }
1015                 }
1016         } else {
1017                 printk
1018                     ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1019                      cache, op, base, arch->gprs[base], offset);
1020                 er = EMULATE_FAIL;
1021                 preempt_enable();
1022                 goto dont_update_pc;
1023
1024         }
1025
1026 skip_fault:
1027         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1028         if (cache == MIPS_CACHE_DCACHE
1029             && (op == MIPS_CACHE_OP_FILL_WB_INV
1030                 || op == MIPS_CACHE_OP_HIT_INV)) {
1031                 flush_dcache_line(va);
1032
1033 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1034                 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1035                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1036 #endif
1037         } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1038                 flush_dcache_line(va);
1039                 flush_icache_line(va);
1040
1041 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1042                 /* Replace the CACHE instruction, with a SYNCI */
1043                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1044 #endif
1045         } else {
1046                 printk
1047                     ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1048                      cache, op, base, arch->gprs[base], offset);
1049                 er = EMULATE_FAIL;
1050                 preempt_enable();
1051                 goto dont_update_pc;
1052         }
1053
1054         preempt_enable();
1055
1056       dont_update_pc:
1057         /*
1058          * Rollback PC
1059          */
1060         vcpu->arch.pc = curr_pc;
1061       done:
1062         return er;
1063 }
1064
1065 enum emulation_result
1066 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1067                       struct kvm_run *run, struct kvm_vcpu *vcpu)
1068 {
1069         enum emulation_result er = EMULATE_DONE;
1070         uint32_t inst;
1071
1072         /*
1073          *  Fetch the instruction.
1074          */
1075         if (cause & CAUSEF_BD) {
1076                 opc += 1;
1077         }
1078
1079         inst = kvm_get_inst(opc, vcpu);
1080
1081         switch (((union mips_instruction)inst).r_format.opcode) {
1082         case cop0_op:
1083                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1084                 break;
1085         case sb_op:
1086         case sh_op:
1087         case sw_op:
1088                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1089                 break;
1090         case lb_op:
1091         case lbu_op:
1092         case lhu_op:
1093         case lh_op:
1094         case lw_op:
1095                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1096                 break;
1097
1098         case cache_op:
1099                 ++vcpu->stat.cache_exits;
1100                 trace_kvm_exit(vcpu, CACHE_EXITS);
1101                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1102                 break;
1103
1104         default:
1105                 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1106                        inst);
1107                 kvm_arch_vcpu_dump_regs(vcpu);
1108                 er = EMULATE_FAIL;
1109                 break;
1110         }
1111
1112         return er;
1113 }
1114
1115 enum emulation_result
1116 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1117                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1118 {
1119         struct mips_coproc *cop0 = vcpu->arch.cop0;
1120         struct kvm_vcpu_arch *arch = &vcpu->arch;
1121         enum emulation_result er = EMULATE_DONE;
1122
1123         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1124                 /* save old pc */
1125                 kvm_write_c0_guest_epc(cop0, arch->pc);
1126                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1127
1128                 if (cause & CAUSEF_BD)
1129                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1130                 else
1131                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1132
1133                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1134
1135                 kvm_change_c0_guest_cause(cop0, (0xff),
1136                                           (T_SYSCALL << CAUSEB_EXCCODE));
1137
1138                 /* Set PC to the exception entry point */
1139                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1140
1141         } else {
1142                 printk("Trying to deliver SYSCALL when EXL is already set\n");
1143                 er = EMULATE_FAIL;
1144         }
1145
1146         return er;
1147 }
1148
1149 enum emulation_result
1150 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1151                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1152 {
1153         struct mips_coproc *cop0 = vcpu->arch.cop0;
1154         struct kvm_vcpu_arch *arch = &vcpu->arch;
1155         enum emulation_result er = EMULATE_DONE;
1156         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1157                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1158
1159         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1160                 /* save old pc */
1161                 kvm_write_c0_guest_epc(cop0, arch->pc);
1162                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1163
1164                 if (cause & CAUSEF_BD)
1165                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1166                 else
1167                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1168
1169                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1170                           arch->pc);
1171
1172                 /* set pc to the exception entry point */
1173                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1174
1175         } else {
1176                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1177                           arch->pc);
1178
1179                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1180         }
1181
1182         kvm_change_c0_guest_cause(cop0, (0xff),
1183                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1184
1185         /* setup badvaddr, context and entryhi registers for the guest */
1186         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1187         /* XXXKYMA: is the context register used by linux??? */
1188         kvm_write_c0_guest_entryhi(cop0, entryhi);
1189         /* Blow away the shadow host TLBs */
1190         kvm_mips_flush_host_tlb(1);
1191
1192         return er;
1193 }
1194
1195 enum emulation_result
1196 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1197                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1198 {
1199         struct mips_coproc *cop0 = vcpu->arch.cop0;
1200         struct kvm_vcpu_arch *arch = &vcpu->arch;
1201         enum emulation_result er = EMULATE_DONE;
1202         unsigned long entryhi =
1203                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1204                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1205
1206         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1207                 /* save old pc */
1208                 kvm_write_c0_guest_epc(cop0, arch->pc);
1209                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1210
1211                 if (cause & CAUSEF_BD)
1212                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1213                 else
1214                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1215
1216                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1217                           arch->pc);
1218
1219                 /* set pc to the exception entry point */
1220                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1221
1222         } else {
1223                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1224                           arch->pc);
1225                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1226         }
1227
1228         kvm_change_c0_guest_cause(cop0, (0xff),
1229                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1230
1231         /* setup badvaddr, context and entryhi registers for the guest */
1232         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1233         /* XXXKYMA: is the context register used by linux??? */
1234         kvm_write_c0_guest_entryhi(cop0, entryhi);
1235         /* Blow away the shadow host TLBs */
1236         kvm_mips_flush_host_tlb(1);
1237
1238         return er;
1239 }
1240
1241 enum emulation_result
1242 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1243                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1244 {
1245         struct mips_coproc *cop0 = vcpu->arch.cop0;
1246         struct kvm_vcpu_arch *arch = &vcpu->arch;
1247         enum emulation_result er = EMULATE_DONE;
1248         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1249                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1250
1251         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1252                 /* save old pc */
1253                 kvm_write_c0_guest_epc(cop0, arch->pc);
1254                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1255
1256                 if (cause & CAUSEF_BD)
1257                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1258                 else
1259                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1260
1261                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1262                           arch->pc);
1263
1264                 /* Set PC to the exception entry point */
1265                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1266         } else {
1267                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1268                           arch->pc);
1269                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1270         }
1271
1272         kvm_change_c0_guest_cause(cop0, (0xff),
1273                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1274
1275         /* setup badvaddr, context and entryhi registers for the guest */
1276         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1277         /* XXXKYMA: is the context register used by linux??? */
1278         kvm_write_c0_guest_entryhi(cop0, entryhi);
1279         /* Blow away the shadow host TLBs */
1280         kvm_mips_flush_host_tlb(1);
1281
1282         return er;
1283 }
1284
1285 enum emulation_result
1286 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1287                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1288 {
1289         struct mips_coproc *cop0 = vcpu->arch.cop0;
1290         struct kvm_vcpu_arch *arch = &vcpu->arch;
1291         enum emulation_result er = EMULATE_DONE;
1292         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1293                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1294
1295         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1296                 /* save old pc */
1297                 kvm_write_c0_guest_epc(cop0, arch->pc);
1298                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1299
1300                 if (cause & CAUSEF_BD)
1301                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1302                 else
1303                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1304
1305                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1306                           arch->pc);
1307
1308                 /* Set PC to the exception entry point */
1309                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1310         } else {
1311                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1312                           arch->pc);
1313                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1314         }
1315
1316         kvm_change_c0_guest_cause(cop0, (0xff),
1317                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1318
1319         /* setup badvaddr, context and entryhi registers for the guest */
1320         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1321         /* XXXKYMA: is the context register used by linux??? */
1322         kvm_write_c0_guest_entryhi(cop0, entryhi);
1323         /* Blow away the shadow host TLBs */
1324         kvm_mips_flush_host_tlb(1);
1325
1326         return er;
1327 }
1328
1329 /* TLBMOD: store into address matching TLB with Dirty bit off */
1330 enum emulation_result
1331 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1332                        struct kvm_run *run, struct kvm_vcpu *vcpu)
1333 {
1334         enum emulation_result er = EMULATE_DONE;
1335
1336 #ifdef DEBUG
1337         /*
1338          * If address not in the guest TLB, then we are in trouble
1339          */
1340         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1341         if (index < 0) {
1342                 /* XXXKYMA Invalidate and retry */
1343                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1344                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1345                      __func__, entryhi);
1346                 kvm_mips_dump_guest_tlbs(vcpu);
1347                 kvm_mips_dump_host_tlbs();
1348                 return EMULATE_FAIL;
1349         }
1350 #endif
1351
1352         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1353         return er;
1354 }
1355
1356 enum emulation_result
1357 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1358                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1359 {
1360         struct mips_coproc *cop0 = vcpu->arch.cop0;
1361         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1362                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1363         struct kvm_vcpu_arch *arch = &vcpu->arch;
1364         enum emulation_result er = EMULATE_DONE;
1365
1366         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1367                 /* save old pc */
1368                 kvm_write_c0_guest_epc(cop0, arch->pc);
1369                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1370
1371                 if (cause & CAUSEF_BD)
1372                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1373                 else
1374                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1375
1376                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1377                           arch->pc);
1378
1379                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1380         } else {
1381                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1382                           arch->pc);
1383                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1384         }
1385
1386         kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1387
1388         /* setup badvaddr, context and entryhi registers for the guest */
1389         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1390         /* XXXKYMA: is the context register used by linux??? */
1391         kvm_write_c0_guest_entryhi(cop0, entryhi);
1392         /* Blow away the shadow host TLBs */
1393         kvm_mips_flush_host_tlb(1);
1394
1395         return er;
1396 }
1397
1398 enum emulation_result
1399 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1400                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1401 {
1402         struct mips_coproc *cop0 = vcpu->arch.cop0;
1403         struct kvm_vcpu_arch *arch = &vcpu->arch;
1404         enum emulation_result er = EMULATE_DONE;
1405
1406         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1407                 /* save old pc */
1408                 kvm_write_c0_guest_epc(cop0, arch->pc);
1409                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1410
1411                 if (cause & CAUSEF_BD)
1412                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1413                 else
1414                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1415
1416         }
1417
1418         arch->pc = KVM_GUEST_KSEG0 + 0x180;
1419
1420         kvm_change_c0_guest_cause(cop0, (0xff),
1421                                   (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1422         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1423
1424         return er;
1425 }
1426
1427 enum emulation_result
1428 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1429                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1430 {
1431         struct mips_coproc *cop0 = vcpu->arch.cop0;
1432         struct kvm_vcpu_arch *arch = &vcpu->arch;
1433         enum emulation_result er = EMULATE_DONE;
1434
1435         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1436                 /* save old pc */
1437                 kvm_write_c0_guest_epc(cop0, arch->pc);
1438                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1439
1440                 if (cause & CAUSEF_BD)
1441                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1442                 else
1443                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1444
1445                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1446
1447                 kvm_change_c0_guest_cause(cop0, (0xff),
1448                                           (T_RES_INST << CAUSEB_EXCCODE));
1449
1450                 /* Set PC to the exception entry point */
1451                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1452
1453         } else {
1454                 kvm_err("Trying to deliver RI when EXL is already set\n");
1455                 er = EMULATE_FAIL;
1456         }
1457
1458         return er;
1459 }
1460
1461 enum emulation_result
1462 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1463                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1464 {
1465         struct mips_coproc *cop0 = vcpu->arch.cop0;
1466         struct kvm_vcpu_arch *arch = &vcpu->arch;
1467         enum emulation_result er = EMULATE_DONE;
1468
1469         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1470                 /* save old pc */
1471                 kvm_write_c0_guest_epc(cop0, arch->pc);
1472                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1473
1474                 if (cause & CAUSEF_BD)
1475                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1476                 else
1477                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1478
1479                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1480
1481                 kvm_change_c0_guest_cause(cop0, (0xff),
1482                                           (T_BREAK << CAUSEB_EXCCODE));
1483
1484                 /* Set PC to the exception entry point */
1485                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1486
1487         } else {
1488                 printk("Trying to deliver BP when EXL is already set\n");
1489                 er = EMULATE_FAIL;
1490         }
1491
1492         return er;
1493 }
1494
1495 /*
1496  * ll/sc, rdhwr, sync emulation
1497  */
1498
1499 #define OPCODE 0xfc000000
1500 #define BASE   0x03e00000
1501 #define RT     0x001f0000
1502 #define OFFSET 0x0000ffff
1503 #define LL     0xc0000000
1504 #define SC     0xe0000000
1505 #define SPEC0  0x00000000
1506 #define SPEC3  0x7c000000
1507 #define RD     0x0000f800
1508 #define FUNC   0x0000003f
1509 #define SYNC   0x0000000f
1510 #define RDHWR  0x0000003b
1511
1512 enum emulation_result
1513 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1514                    struct kvm_run *run, struct kvm_vcpu *vcpu)
1515 {
1516         struct mips_coproc *cop0 = vcpu->arch.cop0;
1517         struct kvm_vcpu_arch *arch = &vcpu->arch;
1518         enum emulation_result er = EMULATE_DONE;
1519         unsigned long curr_pc;
1520         uint32_t inst;
1521
1522         /*
1523          * Update PC and hold onto current PC in case there is
1524          * an error and we want to rollback the PC
1525          */
1526         curr_pc = vcpu->arch.pc;
1527         er = update_pc(vcpu, cause);
1528         if (er == EMULATE_FAIL)
1529                 return er;
1530
1531         /*
1532          *  Fetch the instruction.
1533          */
1534         if (cause & CAUSEF_BD)
1535                 opc += 1;
1536
1537         inst = kvm_get_inst(opc, vcpu);
1538
1539         if (inst == KVM_INVALID_INST) {
1540                 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1541                 return EMULATE_FAIL;
1542         }
1543
1544         if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1545                 int rd = (inst & RD) >> 11;
1546                 int rt = (inst & RT) >> 16;
1547                 switch (rd) {
1548                 case 0: /* CPU number */
1549                         arch->gprs[rt] = 0;
1550                         break;
1551                 case 1: /* SYNCI length */
1552                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1553                                              current_cpu_data.icache.linesz);
1554                         break;
1555                 case 2: /* Read count register */
1556                         printk("RDHWR: Cont register\n");
1557                         arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1558                         break;
1559                 case 3: /* Count register resolution */
1560                         switch (current_cpu_data.cputype) {
1561                         case CPU_20KC:
1562                         case CPU_25KF:
1563                                 arch->gprs[rt] = 1;
1564                                 break;
1565                         default:
1566                                 arch->gprs[rt] = 2;
1567                         }
1568                         break;
1569                 case 29:
1570 #if 1
1571                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1572 #else
1573                         /* UserLocal not implemented */
1574                         er = EMULATE_FAIL;
1575 #endif
1576                         break;
1577
1578                 default:
1579                         kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
1580                         er = EMULATE_FAIL;
1581                         break;
1582                 }
1583         } else {
1584                 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
1585                 er = EMULATE_FAIL;
1586         }
1587
1588         /*
1589          * Rollback PC only if emulation was unsuccessful
1590          */
1591         if (er == EMULATE_FAIL) {
1592                 vcpu->arch.pc = curr_pc;
1593                 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1594         }
1595         return er;
1596 }
1597
1598 enum emulation_result
1599 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1600 {
1601         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1602         enum emulation_result er = EMULATE_DONE;
1603         unsigned long curr_pc;
1604
1605         if (run->mmio.len > sizeof(*gpr)) {
1606                 printk("Bad MMIO length: %d", run->mmio.len);
1607                 er = EMULATE_FAIL;
1608                 goto done;
1609         }
1610
1611         /*
1612          * Update PC and hold onto current PC in case there is
1613          * an error and we want to rollback the PC
1614          */
1615         curr_pc = vcpu->arch.pc;
1616         er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1617         if (er == EMULATE_FAIL)
1618                 return er;
1619
1620         switch (run->mmio.len) {
1621         case 4:
1622                 *gpr = *(int32_t *) run->mmio.data;
1623                 break;
1624
1625         case 2:
1626                 if (vcpu->mmio_needed == 2)
1627                         *gpr = *(int16_t *) run->mmio.data;
1628                 else
1629                         *gpr = *(int16_t *) run->mmio.data;
1630
1631                 break;
1632         case 1:
1633                 if (vcpu->mmio_needed == 2)
1634                         *gpr = *(int8_t *) run->mmio.data;
1635                 else
1636                         *gpr = *(u8 *) run->mmio.data;
1637                 break;
1638         }
1639
1640         if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1641                 kvm_debug
1642                     ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1643                      vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1644                      vcpu->mmio_needed);
1645
1646 done:
1647         return er;
1648 }
1649
1650 static enum emulation_result
1651 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1652                      struct kvm_run *run, struct kvm_vcpu *vcpu)
1653 {
1654         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1655         struct mips_coproc *cop0 = vcpu->arch.cop0;
1656         struct kvm_vcpu_arch *arch = &vcpu->arch;
1657         enum emulation_result er = EMULATE_DONE;
1658
1659         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1660                 /* save old pc */
1661                 kvm_write_c0_guest_epc(cop0, arch->pc);
1662                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1663
1664                 if (cause & CAUSEF_BD)
1665                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1666                 else
1667                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1668
1669                 kvm_change_c0_guest_cause(cop0, (0xff),
1670                                           (exccode << CAUSEB_EXCCODE));
1671
1672                 /* Set PC to the exception entry point */
1673                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1674                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1675
1676                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1677                           exccode, kvm_read_c0_guest_epc(cop0),
1678                           kvm_read_c0_guest_badvaddr(cop0));
1679         } else {
1680                 printk("Trying to deliver EXC when EXL is already set\n");
1681                 er = EMULATE_FAIL;
1682         }
1683
1684         return er;
1685 }
1686
1687 enum emulation_result
1688 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1689                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1690 {
1691         enum emulation_result er = EMULATE_DONE;
1692         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1693         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1694
1695         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1696
1697         if (usermode) {
1698                 switch (exccode) {
1699                 case T_INT:
1700                 case T_SYSCALL:
1701                 case T_BREAK:
1702                 case T_RES_INST:
1703                         break;
1704
1705                 case T_COP_UNUSABLE:
1706                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1707                                 er = EMULATE_PRIV_FAIL;
1708                         break;
1709
1710                 case T_TLB_MOD:
1711                         break;
1712
1713                 case T_TLB_LD_MISS:
1714                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1715                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1716                                 printk("%s: LD MISS @ %#lx\n", __func__,
1717                                        badvaddr);
1718                                 cause &= ~0xff;
1719                                 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1720                                 er = EMULATE_PRIV_FAIL;
1721                         }
1722                         break;
1723
1724                 case T_TLB_ST_MISS:
1725                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1726                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1727                                 printk("%s: ST MISS @ %#lx\n", __func__,
1728                                        badvaddr);
1729                                 cause &= ~0xff;
1730                                 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1731                                 er = EMULATE_PRIV_FAIL;
1732                         }
1733                         break;
1734
1735                 case T_ADDR_ERR_ST:
1736                         printk("%s: address error ST @ %#lx\n", __func__,
1737                                badvaddr);
1738                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1739                                 cause &= ~0xff;
1740                                 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1741                         }
1742                         er = EMULATE_PRIV_FAIL;
1743                         break;
1744                 case T_ADDR_ERR_LD:
1745                         printk("%s: address error LD @ %#lx\n", __func__,
1746                                badvaddr);
1747                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1748                                 cause &= ~0xff;
1749                                 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1750                         }
1751                         er = EMULATE_PRIV_FAIL;
1752                         break;
1753                 default:
1754                         er = EMULATE_PRIV_FAIL;
1755                         break;
1756                 }
1757         }
1758
1759         if (er == EMULATE_PRIV_FAIL) {
1760                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1761         }
1762         return er;
1763 }
1764
1765 /* User Address (UA) fault, this could happen if
1766  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1767  *     case we pass on the fault to the guest kernel and let it handle it.
1768  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1769  *     case we inject the TLB from the Guest TLB into the shadow host TLB
1770  */
1771 enum emulation_result
1772 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1773                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1774 {
1775         enum emulation_result er = EMULATE_DONE;
1776         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1777         unsigned long va = vcpu->arch.host_cp0_badvaddr;
1778         int index;
1779
1780         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1781                   vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1782
1783         /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1784          * Check the Guest TLB, if the entry is not there then send the guest an
1785          * exception. The guest exc handler should then inject an entry into the
1786          * guest TLB
1787          */
1788         index = kvm_mips_guest_tlb_lookup(vcpu,
1789                                           (va & VPN2_MASK) |
1790                                           (kvm_read_c0_guest_entryhi
1791                                            (vcpu->arch.cop0) & ASID_MASK));
1792         if (index < 0) {
1793                 if (exccode == T_TLB_LD_MISS) {
1794                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1795                 } else if (exccode == T_TLB_ST_MISS) {
1796                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1797                 } else {
1798                         printk("%s: invalid exc code: %d\n", __func__, exccode);
1799                         er = EMULATE_FAIL;
1800                 }
1801         } else {
1802                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1803
1804                 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1805                 if (!TLB_IS_VALID(*tlb, va)) {
1806                         if (exccode == T_TLB_LD_MISS) {
1807                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1808                                                                 vcpu);
1809                         } else if (exccode == T_TLB_ST_MISS) {
1810                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1811                                                                 vcpu);
1812                         } else {
1813                                 printk("%s: invalid exc code: %d\n", __func__,
1814                                        exccode);
1815                                 er = EMULATE_FAIL;
1816                         }
1817                 } else {
1818 #ifdef DEBUG
1819                         kvm_debug
1820                             ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1821                              tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1822 #endif
1823                         /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1824                         kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1825                                                              NULL);
1826                 }
1827         }
1828
1829         return er;
1830 }