Merge tag 'fcoe' into fixes
[firefly-linux-kernel-4.4.55.git] / arch / mips / kvm / kvm_mips_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/inst.h>
26
27 #undef CONFIG_MIPS_MT
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
30
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
34
35 #include "trace.h"
36
37 /*
38  * Compute the return address and do emulate branch simulation, if required.
39  * This function should be called only in branch delay slot active.
40  */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42         unsigned long instpc)
43 {
44         unsigned int dspcontrol;
45         union mips_instruction insn;
46         struct kvm_vcpu_arch *arch = &vcpu->arch;
47         long epc = instpc;
48         long nextpc = KVM_INVALID_INST;
49
50         if (epc & 3)
51                 goto unaligned;
52
53         /*
54          * Read the instruction
55          */
56         insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58         if (insn.word == KVM_INVALID_INST)
59                 return KVM_INVALID_INST;
60
61         switch (insn.i_format.opcode) {
62                 /*
63                  * jr and jalr are in r_format format.
64                  */
65         case spec_op:
66                 switch (insn.r_format.func) {
67                 case jalr_op:
68                         arch->gprs[insn.r_format.rd] = epc + 8;
69                         /* Fall through */
70                 case jr_op:
71                         nextpc = arch->gprs[insn.r_format.rs];
72                         break;
73                 }
74                 break;
75
76                 /*
77                  * This group contains:
78                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
79                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80                  */
81         case bcond_op:
82                 switch (insn.i_format.rt) {
83                 case bltz_op:
84                 case bltzl_op:
85                         if ((long)arch->gprs[insn.i_format.rs] < 0)
86                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
87                         else
88                                 epc += 8;
89                         nextpc = epc;
90                         break;
91
92                 case bgez_op:
93                 case bgezl_op:
94                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
95                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
96                         else
97                                 epc += 8;
98                         nextpc = epc;
99                         break;
100
101                 case bltzal_op:
102                 case bltzall_op:
103                         arch->gprs[31] = epc + 8;
104                         if ((long)arch->gprs[insn.i_format.rs] < 0)
105                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
106                         else
107                                 epc += 8;
108                         nextpc = epc;
109                         break;
110
111                 case bgezal_op:
112                 case bgezall_op:
113                         arch->gprs[31] = epc + 8;
114                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
115                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
116                         else
117                                 epc += 8;
118                         nextpc = epc;
119                         break;
120                 case bposge32_op:
121                         if (!cpu_has_dsp)
122                                 goto sigill;
123
124                         dspcontrol = rddsp(0x01);
125
126                         if (dspcontrol >= 32) {
127                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
128                         } else
129                                 epc += 8;
130                         nextpc = epc;
131                         break;
132                 }
133                 break;
134
135                 /*
136                  * These are unconditional and in j_format.
137                  */
138         case jal_op:
139                 arch->gprs[31] = instpc + 8;
140         case j_op:
141                 epc += 4;
142                 epc >>= 28;
143                 epc <<= 28;
144                 epc |= (insn.j_format.target << 2);
145                 nextpc = epc;
146                 break;
147
148                 /*
149                  * These are conditional and in i_format.
150                  */
151         case beq_op:
152         case beql_op:
153                 if (arch->gprs[insn.i_format.rs] ==
154                     arch->gprs[insn.i_format.rt])
155                         epc = epc + 4 + (insn.i_format.simmediate << 2);
156                 else
157                         epc += 8;
158                 nextpc = epc;
159                 break;
160
161         case bne_op:
162         case bnel_op:
163                 if (arch->gprs[insn.i_format.rs] !=
164                     arch->gprs[insn.i_format.rt])
165                         epc = epc + 4 + (insn.i_format.simmediate << 2);
166                 else
167                         epc += 8;
168                 nextpc = epc;
169                 break;
170
171         case blez_op:           /* not really i_format */
172         case blezl_op:
173                 /* rt field assumed to be zero */
174                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175                         epc = epc + 4 + (insn.i_format.simmediate << 2);
176                 else
177                         epc += 8;
178                 nextpc = epc;
179                 break;
180
181         case bgtz_op:
182         case bgtzl_op:
183                 /* rt field assumed to be zero */
184                 if ((long)arch->gprs[insn.i_format.rs] > 0)
185                         epc = epc + 4 + (insn.i_format.simmediate << 2);
186                 else
187                         epc += 8;
188                 nextpc = epc;
189                 break;
190
191                 /*
192                  * And now the FPA/cp1 branch instructions.
193                  */
194         case cop1_op:
195                 printk("%s: unsupported cop1_op\n", __func__);
196                 break;
197         }
198
199         return nextpc;
200
201 unaligned:
202         printk("%s: unaligned epc\n", __func__);
203         return nextpc;
204
205 sigill:
206         printk("%s: DSP branch but not DSP ASE\n", __func__);
207         return nextpc;
208 }
209
210 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211 {
212         unsigned long branch_pc;
213         enum emulation_result er = EMULATE_DONE;
214
215         if (cause & CAUSEF_BD) {
216                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217                 if (branch_pc == KVM_INVALID_INST) {
218                         er = EMULATE_FAIL;
219                 } else {
220                         vcpu->arch.pc = branch_pc;
221                         kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222                 }
223         } else
224                 vcpu->arch.pc += 4;
225
226         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228         return er;
229 }
230
231 /* Everytime the compare register is written to, we need to decide when to fire
232  * the timer that represents timer ticks to the GUEST.
233  *
234  */
235 enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236 {
237         struct mips_coproc *cop0 = vcpu->arch.cop0;
238         enum emulation_result er = EMULATE_DONE;
239
240         /* If COUNT is enabled */
241         if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243                 hrtimer_start(&vcpu->arch.comparecount_timer,
244                               ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245         } else {
246                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247         }
248
249         return er;
250 }
251
252 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253 {
254         struct mips_coproc *cop0 = vcpu->arch.cop0;
255         enum emulation_result er = EMULATE_DONE;
256
257         if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259                           kvm_read_c0_guest_epc(cop0));
260                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263         } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266         } else {
267                 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268                        vcpu->arch.pc);
269                 er = EMULATE_FAIL;
270         }
271
272         return er;
273 }
274
275 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276 {
277         enum emulation_result er = EMULATE_DONE;
278
279         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280                   vcpu->arch.pending_exceptions);
281
282         ++vcpu->stat.wait_exits;
283         trace_kvm_exit(vcpu, WAIT_EXITS);
284         if (!vcpu->arch.pending_exceptions) {
285                 vcpu->arch.wait = 1;
286                 kvm_vcpu_block(vcpu);
287
288                 /* We we are runnable, then definitely go off to user space to check if any
289                  * I/O interrupts are pending.
290                  */
291                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294                 }
295         }
296
297         return er;
298 }
299
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301  * this, if things ever change
302  */
303 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304 {
305         struct mips_coproc *cop0 = vcpu->arch.cop0;
306         enum emulation_result er = EMULATE_FAIL;
307         uint32_t pc = vcpu->arch.pc;
308
309         printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310         return er;
311 }
312
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315 {
316         struct mips_coproc *cop0 = vcpu->arch.cop0;
317         int index = kvm_read_c0_guest_index(cop0);
318         enum emulation_result er = EMULATE_DONE;
319         struct kvm_mips_tlb *tlb = NULL;
320         uint32_t pc = vcpu->arch.pc;
321
322         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323                 printk("%s: illegal index: %d\n", __func__, index);
324                 printk
325                     ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326                      pc, index, kvm_read_c0_guest_entryhi(cop0),
327                      kvm_read_c0_guest_entrylo0(cop0),
328                      kvm_read_c0_guest_entrylo1(cop0),
329                      kvm_read_c0_guest_pagemask(cop0));
330                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331         }
332
333         tlb = &vcpu->arch.guest_tlb[index];
334 #if 1
335         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337 #endif
338
339         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344         kvm_debug
345             ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346              pc, index, kvm_read_c0_guest_entryhi(cop0),
347              kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348              kvm_read_c0_guest_pagemask(cop0));
349
350         return er;
351 }
352
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355 {
356         struct mips_coproc *cop0 = vcpu->arch.cop0;
357         enum emulation_result er = EMULATE_DONE;
358         struct kvm_mips_tlb *tlb = NULL;
359         uint32_t pc = vcpu->arch.pc;
360         int index;
361
362 #if 1
363         get_random_bytes(&index, sizeof(index));
364         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365 #else
366         index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367 #endif
368
369         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370                 printk("%s: illegal index: %d\n", __func__, index);
371                 return EMULATE_FAIL;
372         }
373
374         tlb = &vcpu->arch.guest_tlb[index];
375
376 #if 1
377         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379 #endif
380
381         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386         kvm_debug
387             ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388              pc, index, kvm_read_c0_guest_entryhi(cop0),
389              kvm_read_c0_guest_entrylo0(cop0),
390              kvm_read_c0_guest_entrylo1(cop0));
391
392         return er;
393 }
394
395 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396 {
397         struct mips_coproc *cop0 = vcpu->arch.cop0;
398         long entryhi = kvm_read_c0_guest_entryhi(cop0);
399         enum emulation_result er = EMULATE_DONE;
400         uint32_t pc = vcpu->arch.pc;
401         int index = -1;
402
403         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405         kvm_write_c0_guest_index(cop0, index);
406
407         kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408                   index);
409
410         return er;
411 }
412
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415                      struct kvm_run *run, struct kvm_vcpu *vcpu)
416 {
417         struct mips_coproc *cop0 = vcpu->arch.cop0;
418         enum emulation_result er = EMULATE_DONE;
419         int32_t rt, rd, copz, sel, co_bit, op;
420         uint32_t pc = vcpu->arch.pc;
421         unsigned long curr_pc;
422
423         /*
424          * Update PC and hold onto current PC in case there is
425          * an error and we want to rollback the PC
426          */
427         curr_pc = vcpu->arch.pc;
428         er = update_pc(vcpu, cause);
429         if (er == EMULATE_FAIL) {
430                 return er;
431         }
432
433         copz = (inst >> 21) & 0x1f;
434         rt = (inst >> 16) & 0x1f;
435         rd = (inst >> 11) & 0x1f;
436         sel = inst & 0x7;
437         co_bit = (inst >> 25) & 1;
438
439         /* Verify that the register is valid */
440         if (rd > MIPS_CP0_DESAVE) {
441                 printk("Invalid rd: %d\n", rd);
442                 er = EMULATE_FAIL;
443                 goto done;
444         }
445
446         if (co_bit) {
447                 op = (inst) & 0xff;
448
449                 switch (op) {
450                 case tlbr_op:   /*  Read indexed TLB entry  */
451                         er = kvm_mips_emul_tlbr(vcpu);
452                         break;
453                 case tlbwi_op:  /*  Write indexed  */
454                         er = kvm_mips_emul_tlbwi(vcpu);
455                         break;
456                 case tlbwr_op:  /*  Write random  */
457                         er = kvm_mips_emul_tlbwr(vcpu);
458                         break;
459                 case tlbp_op:   /* TLB Probe */
460                         er = kvm_mips_emul_tlbp(vcpu);
461                         break;
462                 case rfe_op:
463                         printk("!!!COP0_RFE!!!\n");
464                         break;
465                 case eret_op:
466                         er = kvm_mips_emul_eret(vcpu);
467                         goto dont_update_pc;
468                         break;
469                 case wait_op:
470                         er = kvm_mips_emul_wait(vcpu);
471                         break;
472                 }
473         } else {
474                 switch (copz) {
475                 case mfc_op:
476 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477                         cop0->stat[rd][sel]++;
478 #endif
479                         /* Get reg */
480                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481                                 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482                                 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484                                 vcpu->arch.gprs[rt] = 0x0;
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
487 #endif
488                         }
489                         else {
490                                 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
493                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
494 #endif
495                         }
496
497                         kvm_debug
498                             ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499                              pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501                         break;
502
503                 case dmfc_op:
504                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505                         break;
506
507                 case mtc_op:
508 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509                         cop0->stat[rd][sel]++;
510 #endif
511                         if ((rd == MIPS_CP0_TLB_INDEX)
512                             && (vcpu->arch.gprs[rt] >=
513                                 KVM_MIPS_GUEST_TLB_SIZE)) {
514                                 printk("Invalid TLB Index: %ld",
515                                        vcpu->arch.gprs[rt]);
516                                 er = EMULATE_FAIL;
517                                 break;
518                         }
519 #define C0_EBASE_CORE_MASK 0xff
520                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521                                 /* Preserve CORE number */
522                                 kvm_change_c0_guest_ebase(cop0,
523                                                           ~(C0_EBASE_CORE_MASK),
524                                                           vcpu->arch.gprs[rt]);
525                                 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526                                        kvm_read_c0_guest_ebase(cop0));
527                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528                                 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
529                                 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530                                     &&
531                                     (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
532                                       != nasid)) {
533
534                                         kvm_debug
535                                             ("MTCz, change ASID from %#lx to %#lx\n",
536                                              ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
537                                              ASID_MASK(vcpu->arch.gprs[rt]));
538
539                                         /* Blow away the shadow host TLBs */
540                                         kvm_mips_flush_host_tlb(1);
541                                 }
542                                 kvm_write_c0_guest_entryhi(cop0,
543                                                            vcpu->arch.gprs[rt]);
544                         }
545                         /* Are we writing to COUNT */
546                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
547                                 /* Linux doesn't seem to write into COUNT, we throw an error
548                                  * if we notice a write to COUNT
549                                  */
550                                 /*er = EMULATE_FAIL; */
551                                 goto done;
552                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
553                                 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
554                                           pc, kvm_read_c0_guest_compare(cop0),
555                                           vcpu->arch.gprs[rt]);
556
557                                 /* If we are writing to COMPARE */
558                                 /* Clear pending timer interrupt, if any */
559                                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
560                                 kvm_write_c0_guest_compare(cop0,
561                                                            vcpu->arch.gprs[rt]);
562                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
563                                 kvm_write_c0_guest_status(cop0,
564                                                           vcpu->arch.gprs[rt]);
565                                 /* Make sure that CU1 and NMI bits are never set */
566                                 kvm_clear_c0_guest_status(cop0,
567                                                           (ST0_CU1 | ST0_NMI));
568
569 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
570                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
571 #endif
572                         } else {
573                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
574 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
575                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
576 #endif
577                         }
578
579                         kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
580                                   rd, sel, cop0->reg[rd][sel]);
581                         break;
582
583                 case dmtc_op:
584                         printk
585                             ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
586                              vcpu->arch.pc, rt, rd, sel);
587                         er = EMULATE_FAIL;
588                         break;
589
590                 case mfmcz_op:
591 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
592                         cop0->stat[MIPS_CP0_STATUS][0]++;
593 #endif
594                         if (rt != 0) {
595                                 vcpu->arch.gprs[rt] =
596                                     kvm_read_c0_guest_status(cop0);
597                         }
598                         /* EI */
599                         if (inst & 0x20) {
600                                 kvm_debug("[%#lx] mfmcz_op: EI\n",
601                                           vcpu->arch.pc);
602                                 kvm_set_c0_guest_status(cop0, ST0_IE);
603                         } else {
604                                 kvm_debug("[%#lx] mfmcz_op: DI\n",
605                                           vcpu->arch.pc);
606                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
607                         }
608
609                         break;
610
611                 case wrpgpr_op:
612                         {
613                                 uint32_t css =
614                                     cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
615                                 uint32_t pss =
616                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
617                                 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
618                                 if (css || pss) {
619                                         er = EMULATE_FAIL;
620                                         break;
621                                 }
622                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
623                                           vcpu->arch.gprs[rt]);
624                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
625                         }
626                         break;
627                 default:
628                         printk
629                             ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
630                              vcpu->arch.pc, copz);
631                         er = EMULATE_FAIL;
632                         break;
633                 }
634         }
635
636 done:
637         /*
638          * Rollback PC only if emulation was unsuccessful
639          */
640         if (er == EMULATE_FAIL) {
641                 vcpu->arch.pc = curr_pc;
642         }
643
644 dont_update_pc:
645         /*
646          * This is for special instructions whose emulation
647          * updates the PC, so do not overwrite the PC under
648          * any circumstances
649          */
650
651         return er;
652 }
653
654 enum emulation_result
655 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
656                        struct kvm_run *run, struct kvm_vcpu *vcpu)
657 {
658         enum emulation_result er = EMULATE_DO_MMIO;
659         int32_t op, base, rt, offset;
660         uint32_t bytes;
661         void *data = run->mmio.data;
662         unsigned long curr_pc;
663
664         /*
665          * Update PC and hold onto current PC in case there is
666          * an error and we want to rollback the PC
667          */
668         curr_pc = vcpu->arch.pc;
669         er = update_pc(vcpu, cause);
670         if (er == EMULATE_FAIL)
671                 return er;
672
673         rt = (inst >> 16) & 0x1f;
674         base = (inst >> 21) & 0x1f;
675         offset = inst & 0xffff;
676         op = (inst >> 26) & 0x3f;
677
678         switch (op) {
679         case sb_op:
680                 bytes = 1;
681                 if (bytes > sizeof(run->mmio.data)) {
682                         kvm_err("%s: bad MMIO length: %d\n", __func__,
683                                run->mmio.len);
684                 }
685                 run->mmio.phys_addr =
686                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
687                                                    host_cp0_badvaddr);
688                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
689                         er = EMULATE_FAIL;
690                         break;
691                 }
692                 run->mmio.len = bytes;
693                 run->mmio.is_write = 1;
694                 vcpu->mmio_needed = 1;
695                 vcpu->mmio_is_write = 1;
696                 *(u8 *) data = vcpu->arch.gprs[rt];
697                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
698                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
699                           *(uint8_t *) data);
700
701                 break;
702
703         case sw_op:
704                 bytes = 4;
705                 if (bytes > sizeof(run->mmio.data)) {
706                         kvm_err("%s: bad MMIO length: %d\n", __func__,
707                                run->mmio.len);
708                 }
709                 run->mmio.phys_addr =
710                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
711                                                    host_cp0_badvaddr);
712                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
713                         er = EMULATE_FAIL;
714                         break;
715                 }
716
717                 run->mmio.len = bytes;
718                 run->mmio.is_write = 1;
719                 vcpu->mmio_needed = 1;
720                 vcpu->mmio_is_write = 1;
721                 *(uint32_t *) data = vcpu->arch.gprs[rt];
722
723                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
724                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
725                           vcpu->arch.gprs[rt], *(uint32_t *) data);
726                 break;
727
728         case sh_op:
729                 bytes = 2;
730                 if (bytes > sizeof(run->mmio.data)) {
731                         kvm_err("%s: bad MMIO length: %d\n", __func__,
732                                run->mmio.len);
733                 }
734                 run->mmio.phys_addr =
735                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
736                                                    host_cp0_badvaddr);
737                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
738                         er = EMULATE_FAIL;
739                         break;
740                 }
741
742                 run->mmio.len = bytes;
743                 run->mmio.is_write = 1;
744                 vcpu->mmio_needed = 1;
745                 vcpu->mmio_is_write = 1;
746                 *(uint16_t *) data = vcpu->arch.gprs[rt];
747
748                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
749                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
750                           vcpu->arch.gprs[rt], *(uint32_t *) data);
751                 break;
752
753         default:
754                 printk("Store not yet supported");
755                 er = EMULATE_FAIL;
756                 break;
757         }
758
759         /*
760          * Rollback PC if emulation was unsuccessful
761          */
762         if (er == EMULATE_FAIL) {
763                 vcpu->arch.pc = curr_pc;
764         }
765
766         return er;
767 }
768
769 enum emulation_result
770 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
771                       struct kvm_run *run, struct kvm_vcpu *vcpu)
772 {
773         enum emulation_result er = EMULATE_DO_MMIO;
774         int32_t op, base, rt, offset;
775         uint32_t bytes;
776
777         rt = (inst >> 16) & 0x1f;
778         base = (inst >> 21) & 0x1f;
779         offset = inst & 0xffff;
780         op = (inst >> 26) & 0x3f;
781
782         vcpu->arch.pending_load_cause = cause;
783         vcpu->arch.io_gpr = rt;
784
785         switch (op) {
786         case lw_op:
787                 bytes = 4;
788                 if (bytes > sizeof(run->mmio.data)) {
789                         kvm_err("%s: bad MMIO length: %d\n", __func__,
790                                run->mmio.len);
791                         er = EMULATE_FAIL;
792                         break;
793                 }
794                 run->mmio.phys_addr =
795                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
796                                                    host_cp0_badvaddr);
797                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
798                         er = EMULATE_FAIL;
799                         break;
800                 }
801
802                 run->mmio.len = bytes;
803                 run->mmio.is_write = 0;
804                 vcpu->mmio_needed = 1;
805                 vcpu->mmio_is_write = 0;
806                 break;
807
808         case lh_op:
809         case lhu_op:
810                 bytes = 2;
811                 if (bytes > sizeof(run->mmio.data)) {
812                         kvm_err("%s: bad MMIO length: %d\n", __func__,
813                                run->mmio.len);
814                         er = EMULATE_FAIL;
815                         break;
816                 }
817                 run->mmio.phys_addr =
818                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
819                                                    host_cp0_badvaddr);
820                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
821                         er = EMULATE_FAIL;
822                         break;
823                 }
824
825                 run->mmio.len = bytes;
826                 run->mmio.is_write = 0;
827                 vcpu->mmio_needed = 1;
828                 vcpu->mmio_is_write = 0;
829
830                 if (op == lh_op)
831                         vcpu->mmio_needed = 2;
832                 else
833                         vcpu->mmio_needed = 1;
834
835                 break;
836
837         case lbu_op:
838         case lb_op:
839                 bytes = 1;
840                 if (bytes > sizeof(run->mmio.data)) {
841                         kvm_err("%s: bad MMIO length: %d\n", __func__,
842                                run->mmio.len);
843                         er = EMULATE_FAIL;
844                         break;
845                 }
846                 run->mmio.phys_addr =
847                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
848                                                    host_cp0_badvaddr);
849                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
850                         er = EMULATE_FAIL;
851                         break;
852                 }
853
854                 run->mmio.len = bytes;
855                 run->mmio.is_write = 0;
856                 vcpu->mmio_is_write = 0;
857
858                 if (op == lb_op)
859                         vcpu->mmio_needed = 2;
860                 else
861                         vcpu->mmio_needed = 1;
862
863                 break;
864
865         default:
866                 printk("Load not yet supported");
867                 er = EMULATE_FAIL;
868                 break;
869         }
870
871         return er;
872 }
873
874 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
875 {
876         unsigned long offset = (va & ~PAGE_MASK);
877         struct kvm *kvm = vcpu->kvm;
878         unsigned long pa;
879         gfn_t gfn;
880         pfn_t pfn;
881
882         gfn = va >> PAGE_SHIFT;
883
884         if (gfn >= kvm->arch.guest_pmap_npages) {
885                 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
886                 kvm_mips_dump_host_tlbs();
887                 kvm_arch_vcpu_dump_regs(vcpu);
888                 return -1;
889         }
890         pfn = kvm->arch.guest_pmap[gfn];
891         pa = (pfn << PAGE_SHIFT) | offset;
892
893         printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
894
895         mips32_SyncICache(CKSEG0ADDR(pa), 32);
896         return 0;
897 }
898
899 #define MIPS_CACHE_OP_INDEX_INV         0x0
900 #define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
901 #define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
902 #define MIPS_CACHE_OP_IMP               0x3
903 #define MIPS_CACHE_OP_HIT_INV           0x4
904 #define MIPS_CACHE_OP_FILL_WB_INV       0x5
905 #define MIPS_CACHE_OP_HIT_HB            0x6
906 #define MIPS_CACHE_OP_FETCH_LOCK        0x7
907
908 #define MIPS_CACHE_ICACHE               0x0
909 #define MIPS_CACHE_DCACHE               0x1
910 #define MIPS_CACHE_SEC                  0x3
911
912 enum emulation_result
913 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
914                        struct kvm_run *run, struct kvm_vcpu *vcpu)
915 {
916         struct mips_coproc *cop0 = vcpu->arch.cop0;
917         extern void (*r4k_blast_dcache) (void);
918         extern void (*r4k_blast_icache) (void);
919         enum emulation_result er = EMULATE_DONE;
920         int32_t offset, cache, op_inst, op, base;
921         struct kvm_vcpu_arch *arch = &vcpu->arch;
922         unsigned long va;
923         unsigned long curr_pc;
924
925         /*
926          * Update PC and hold onto current PC in case there is
927          * an error and we want to rollback the PC
928          */
929         curr_pc = vcpu->arch.pc;
930         er = update_pc(vcpu, cause);
931         if (er == EMULATE_FAIL)
932                 return er;
933
934         base = (inst >> 21) & 0x1f;
935         op_inst = (inst >> 16) & 0x1f;
936         offset = inst & 0xffff;
937         cache = (inst >> 16) & 0x3;
938         op = (inst >> 18) & 0x7;
939
940         va = arch->gprs[base] + offset;
941
942         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
943                   cache, op, base, arch->gprs[base], offset);
944
945         /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
946          * the caches entirely by stepping through all the ways/indexes
947          */
948         if (op == MIPS_CACHE_OP_INDEX_INV) {
949                 kvm_debug
950                     ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
951                      vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
952                      arch->gprs[base], offset);
953
954                 if (cache == MIPS_CACHE_DCACHE)
955                         r4k_blast_dcache();
956                 else if (cache == MIPS_CACHE_ICACHE)
957                         r4k_blast_icache();
958                 else {
959                         printk("%s: unsupported CACHE INDEX operation\n",
960                                __func__);
961                         return EMULATE_FAIL;
962                 }
963
964 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
965                 kvm_mips_trans_cache_index(inst, opc, vcpu);
966 #endif
967                 goto done;
968         }
969
970         preempt_disable();
971         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
972
973                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
974                         kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
975                 }
976         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
977                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
978                 int index;
979
980                 /* If an entry already exists then skip */
981                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
982                         goto skip_fault;
983                 }
984
985                 /* If address not in the guest TLB, then give the guest a fault, the
986                  * resulting handler will do the right thing
987                  */
988                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989                                                   ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
990
991                 if (index < 0) {
992                         vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
993                         vcpu->arch.host_cp0_badvaddr = va;
994                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
995                                                          vcpu);
996                         preempt_enable();
997                         goto dont_update_pc;
998                 } else {
999                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1000                         /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1001                         if (!TLB_IS_VALID(*tlb, va)) {
1002                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1003                                                                 run, vcpu);
1004                                 preempt_enable();
1005                                 goto dont_update_pc;
1006                         } else {
1007                                 /* We fault an entry from the guest tlb to the shadow host TLB */
1008                                 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1009                                                                      NULL,
1010                                                                      NULL);
1011                         }
1012                 }
1013         } else {
1014                 printk
1015                     ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1016                      cache, op, base, arch->gprs[base], offset);
1017                 er = EMULATE_FAIL;
1018                 preempt_enable();
1019                 goto dont_update_pc;
1020
1021         }
1022
1023 skip_fault:
1024         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1025         if (cache == MIPS_CACHE_DCACHE
1026             && (op == MIPS_CACHE_OP_FILL_WB_INV
1027                 || op == MIPS_CACHE_OP_HIT_INV)) {
1028                 flush_dcache_line(va);
1029
1030 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1031                 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1032                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1033 #endif
1034         } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1035                 flush_dcache_line(va);
1036                 flush_icache_line(va);
1037
1038 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1039                 /* Replace the CACHE instruction, with a SYNCI */
1040                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1041 #endif
1042         } else {
1043                 printk
1044                     ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1045                      cache, op, base, arch->gprs[base], offset);
1046                 er = EMULATE_FAIL;
1047                 preempt_enable();
1048                 goto dont_update_pc;
1049         }
1050
1051         preempt_enable();
1052
1053       dont_update_pc:
1054         /*
1055          * Rollback PC
1056          */
1057         vcpu->arch.pc = curr_pc;
1058       done:
1059         return er;
1060 }
1061
1062 enum emulation_result
1063 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1064                       struct kvm_run *run, struct kvm_vcpu *vcpu)
1065 {
1066         enum emulation_result er = EMULATE_DONE;
1067         uint32_t inst;
1068
1069         /*
1070          *  Fetch the instruction.
1071          */
1072         if (cause & CAUSEF_BD) {
1073                 opc += 1;
1074         }
1075
1076         inst = kvm_get_inst(opc, vcpu);
1077
1078         switch (((union mips_instruction)inst).r_format.opcode) {
1079         case cop0_op:
1080                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1081                 break;
1082         case sb_op:
1083         case sh_op:
1084         case sw_op:
1085                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1086                 break;
1087         case lb_op:
1088         case lbu_op:
1089         case lhu_op:
1090         case lh_op:
1091         case lw_op:
1092                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1093                 break;
1094
1095         case cache_op:
1096                 ++vcpu->stat.cache_exits;
1097                 trace_kvm_exit(vcpu, CACHE_EXITS);
1098                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1099                 break;
1100
1101         default:
1102                 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1103                        inst);
1104                 kvm_arch_vcpu_dump_regs(vcpu);
1105                 er = EMULATE_FAIL;
1106                 break;
1107         }
1108
1109         return er;
1110 }
1111
1112 enum emulation_result
1113 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1114                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1115 {
1116         struct mips_coproc *cop0 = vcpu->arch.cop0;
1117         struct kvm_vcpu_arch *arch = &vcpu->arch;
1118         enum emulation_result er = EMULATE_DONE;
1119
1120         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1121                 /* save old pc */
1122                 kvm_write_c0_guest_epc(cop0, arch->pc);
1123                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1124
1125                 if (cause & CAUSEF_BD)
1126                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1127                 else
1128                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1129
1130                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1131
1132                 kvm_change_c0_guest_cause(cop0, (0xff),
1133                                           (T_SYSCALL << CAUSEB_EXCCODE));
1134
1135                 /* Set PC to the exception entry point */
1136                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1137
1138         } else {
1139                 printk("Trying to deliver SYSCALL when EXL is already set\n");
1140                 er = EMULATE_FAIL;
1141         }
1142
1143         return er;
1144 }
1145
1146 enum emulation_result
1147 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1148                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1149 {
1150         struct mips_coproc *cop0 = vcpu->arch.cop0;
1151         struct kvm_vcpu_arch *arch = &vcpu->arch;
1152         enum emulation_result er = EMULATE_DONE;
1153         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1154                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1155
1156         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157                 /* save old pc */
1158                 kvm_write_c0_guest_epc(cop0, arch->pc);
1159                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1160
1161                 if (cause & CAUSEF_BD)
1162                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1163                 else
1164                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1165
1166                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1167                           arch->pc);
1168
1169                 /* set pc to the exception entry point */
1170                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1171
1172         } else {
1173                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1174                           arch->pc);
1175
1176                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1177         }
1178
1179         kvm_change_c0_guest_cause(cop0, (0xff),
1180                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1181
1182         /* setup badvaddr, context and entryhi registers for the guest */
1183         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1184         /* XXXKYMA: is the context register used by linux??? */
1185         kvm_write_c0_guest_entryhi(cop0, entryhi);
1186         /* Blow away the shadow host TLBs */
1187         kvm_mips_flush_host_tlb(1);
1188
1189         return er;
1190 }
1191
1192 enum emulation_result
1193 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1194                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1195 {
1196         struct mips_coproc *cop0 = vcpu->arch.cop0;
1197         struct kvm_vcpu_arch *arch = &vcpu->arch;
1198         enum emulation_result er = EMULATE_DONE;
1199         unsigned long entryhi =
1200                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1202
1203         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204                 /* save old pc */
1205                 kvm_write_c0_guest_epc(cop0, arch->pc);
1206                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1207
1208                 if (cause & CAUSEF_BD)
1209                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1210                 else
1211                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1212
1213                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1214                           arch->pc);
1215
1216                 /* set pc to the exception entry point */
1217                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1218
1219         } else {
1220                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1221                           arch->pc);
1222                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1223         }
1224
1225         kvm_change_c0_guest_cause(cop0, (0xff),
1226                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1227
1228         /* setup badvaddr, context and entryhi registers for the guest */
1229         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1230         /* XXXKYMA: is the context register used by linux??? */
1231         kvm_write_c0_guest_entryhi(cop0, entryhi);
1232         /* Blow away the shadow host TLBs */
1233         kvm_mips_flush_host_tlb(1);
1234
1235         return er;
1236 }
1237
1238 enum emulation_result
1239 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1240                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1241 {
1242         struct mips_coproc *cop0 = vcpu->arch.cop0;
1243         struct kvm_vcpu_arch *arch = &vcpu->arch;
1244         enum emulation_result er = EMULATE_DONE;
1245         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1247
1248         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249                 /* save old pc */
1250                 kvm_write_c0_guest_epc(cop0, arch->pc);
1251                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1252
1253                 if (cause & CAUSEF_BD)
1254                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1255                 else
1256                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1257
1258                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1259                           arch->pc);
1260
1261                 /* Set PC to the exception entry point */
1262                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1263         } else {
1264                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1265                           arch->pc);
1266                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1267         }
1268
1269         kvm_change_c0_guest_cause(cop0, (0xff),
1270                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1271
1272         /* setup badvaddr, context and entryhi registers for the guest */
1273         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1274         /* XXXKYMA: is the context register used by linux??? */
1275         kvm_write_c0_guest_entryhi(cop0, entryhi);
1276         /* Blow away the shadow host TLBs */
1277         kvm_mips_flush_host_tlb(1);
1278
1279         return er;
1280 }
1281
1282 enum emulation_result
1283 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1284                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1285 {
1286         struct mips_coproc *cop0 = vcpu->arch.cop0;
1287         struct kvm_vcpu_arch *arch = &vcpu->arch;
1288         enum emulation_result er = EMULATE_DONE;
1289         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1291
1292         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293                 /* save old pc */
1294                 kvm_write_c0_guest_epc(cop0, arch->pc);
1295                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1296
1297                 if (cause & CAUSEF_BD)
1298                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1299                 else
1300                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1301
1302                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1303                           arch->pc);
1304
1305                 /* Set PC to the exception entry point */
1306                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1307         } else {
1308                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1309                           arch->pc);
1310                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1311         }
1312
1313         kvm_change_c0_guest_cause(cop0, (0xff),
1314                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1315
1316         /* setup badvaddr, context and entryhi registers for the guest */
1317         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1318         /* XXXKYMA: is the context register used by linux??? */
1319         kvm_write_c0_guest_entryhi(cop0, entryhi);
1320         /* Blow away the shadow host TLBs */
1321         kvm_mips_flush_host_tlb(1);
1322
1323         return er;
1324 }
1325
1326 /* TLBMOD: store into address matching TLB with Dirty bit off */
1327 enum emulation_result
1328 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1329                        struct kvm_run *run, struct kvm_vcpu *vcpu)
1330 {
1331         enum emulation_result er = EMULATE_DONE;
1332
1333 #ifdef DEBUG
1334         /*
1335          * If address not in the guest TLB, then we are in trouble
1336          */
1337         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1338         if (index < 0) {
1339                 /* XXXKYMA Invalidate and retry */
1340                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1341                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1342                      __func__, entryhi);
1343                 kvm_mips_dump_guest_tlbs(vcpu);
1344                 kvm_mips_dump_host_tlbs();
1345                 return EMULATE_FAIL;
1346         }
1347 #endif
1348
1349         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1350         return er;
1351 }
1352
1353 enum emulation_result
1354 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1355                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1356 {
1357         struct mips_coproc *cop0 = vcpu->arch.cop0;
1358         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
1360         struct kvm_vcpu_arch *arch = &vcpu->arch;
1361         enum emulation_result er = EMULATE_DONE;
1362
1363         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1364                 /* save old pc */
1365                 kvm_write_c0_guest_epc(cop0, arch->pc);
1366                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1367
1368                 if (cause & CAUSEF_BD)
1369                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1370                 else
1371                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1372
1373                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1374                           arch->pc);
1375
1376                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377         } else {
1378                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1379                           arch->pc);
1380                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1381         }
1382
1383         kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1384
1385         /* setup badvaddr, context and entryhi registers for the guest */
1386         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1387         /* XXXKYMA: is the context register used by linux??? */
1388         kvm_write_c0_guest_entryhi(cop0, entryhi);
1389         /* Blow away the shadow host TLBs */
1390         kvm_mips_flush_host_tlb(1);
1391
1392         return er;
1393 }
1394
1395 enum emulation_result
1396 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1397                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1398 {
1399         struct mips_coproc *cop0 = vcpu->arch.cop0;
1400         struct kvm_vcpu_arch *arch = &vcpu->arch;
1401         enum emulation_result er = EMULATE_DONE;
1402
1403         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1404                 /* save old pc */
1405                 kvm_write_c0_guest_epc(cop0, arch->pc);
1406                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1407
1408                 if (cause & CAUSEF_BD)
1409                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1410                 else
1411                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1412
1413         }
1414
1415         arch->pc = KVM_GUEST_KSEG0 + 0x180;
1416
1417         kvm_change_c0_guest_cause(cop0, (0xff),
1418                                   (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1419         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1420
1421         return er;
1422 }
1423
1424 enum emulation_result
1425 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1426                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1427 {
1428         struct mips_coproc *cop0 = vcpu->arch.cop0;
1429         struct kvm_vcpu_arch *arch = &vcpu->arch;
1430         enum emulation_result er = EMULATE_DONE;
1431
1432         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1433                 /* save old pc */
1434                 kvm_write_c0_guest_epc(cop0, arch->pc);
1435                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1436
1437                 if (cause & CAUSEF_BD)
1438                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1439                 else
1440                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1441
1442                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1443
1444                 kvm_change_c0_guest_cause(cop0, (0xff),
1445                                           (T_RES_INST << CAUSEB_EXCCODE));
1446
1447                 /* Set PC to the exception entry point */
1448                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1449
1450         } else {
1451                 kvm_err("Trying to deliver RI when EXL is already set\n");
1452                 er = EMULATE_FAIL;
1453         }
1454
1455         return er;
1456 }
1457
1458 enum emulation_result
1459 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1460                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1461 {
1462         struct mips_coproc *cop0 = vcpu->arch.cop0;
1463         struct kvm_vcpu_arch *arch = &vcpu->arch;
1464         enum emulation_result er = EMULATE_DONE;
1465
1466         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1467                 /* save old pc */
1468                 kvm_write_c0_guest_epc(cop0, arch->pc);
1469                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1470
1471                 if (cause & CAUSEF_BD)
1472                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1473                 else
1474                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1475
1476                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1477
1478                 kvm_change_c0_guest_cause(cop0, (0xff),
1479                                           (T_BREAK << CAUSEB_EXCCODE));
1480
1481                 /* Set PC to the exception entry point */
1482                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1483
1484         } else {
1485                 printk("Trying to deliver BP when EXL is already set\n");
1486                 er = EMULATE_FAIL;
1487         }
1488
1489         return er;
1490 }
1491
1492 /*
1493  * ll/sc, rdhwr, sync emulation
1494  */
1495
1496 #define OPCODE 0xfc000000
1497 #define BASE   0x03e00000
1498 #define RT     0x001f0000
1499 #define OFFSET 0x0000ffff
1500 #define LL     0xc0000000
1501 #define SC     0xe0000000
1502 #define SPEC0  0x00000000
1503 #define SPEC3  0x7c000000
1504 #define RD     0x0000f800
1505 #define FUNC   0x0000003f
1506 #define SYNC   0x0000000f
1507 #define RDHWR  0x0000003b
1508
1509 enum emulation_result
1510 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1511                    struct kvm_run *run, struct kvm_vcpu *vcpu)
1512 {
1513         struct mips_coproc *cop0 = vcpu->arch.cop0;
1514         struct kvm_vcpu_arch *arch = &vcpu->arch;
1515         enum emulation_result er = EMULATE_DONE;
1516         unsigned long curr_pc;
1517         uint32_t inst;
1518
1519         /*
1520          * Update PC and hold onto current PC in case there is
1521          * an error and we want to rollback the PC
1522          */
1523         curr_pc = vcpu->arch.pc;
1524         er = update_pc(vcpu, cause);
1525         if (er == EMULATE_FAIL)
1526                 return er;
1527
1528         /*
1529          *  Fetch the instruction.
1530          */
1531         if (cause & CAUSEF_BD)
1532                 opc += 1;
1533
1534         inst = kvm_get_inst(opc, vcpu);
1535
1536         if (inst == KVM_INVALID_INST) {
1537                 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1538                 return EMULATE_FAIL;
1539         }
1540
1541         if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1542                 int rd = (inst & RD) >> 11;
1543                 int rt = (inst & RT) >> 16;
1544                 switch (rd) {
1545                 case 0: /* CPU number */
1546                         arch->gprs[rt] = 0;
1547                         break;
1548                 case 1: /* SYNCI length */
1549                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1550                                              current_cpu_data.icache.linesz);
1551                         break;
1552                 case 2: /* Read count register */
1553                         printk("RDHWR: Cont register\n");
1554                         arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1555                         break;
1556                 case 3: /* Count register resolution */
1557                         switch (current_cpu_data.cputype) {
1558                         case CPU_20KC:
1559                         case CPU_25KF:
1560                                 arch->gprs[rt] = 1;
1561                                 break;
1562                         default:
1563                                 arch->gprs[rt] = 2;
1564                         }
1565                         break;
1566                 case 29:
1567 #if 1
1568                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1569 #else
1570                         /* UserLocal not implemented */
1571                         er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1572 #endif
1573                         break;
1574
1575                 default:
1576                         printk("RDHWR not supported\n");
1577                         er = EMULATE_FAIL;
1578                         break;
1579                 }
1580         } else {
1581                 printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
1582                 er = EMULATE_FAIL;
1583         }
1584
1585         /*
1586          * Rollback PC only if emulation was unsuccessful
1587          */
1588         if (er == EMULATE_FAIL) {
1589                 vcpu->arch.pc = curr_pc;
1590         }
1591         return er;
1592 }
1593
1594 enum emulation_result
1595 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1596 {
1597         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1598         enum emulation_result er = EMULATE_DONE;
1599         unsigned long curr_pc;
1600
1601         if (run->mmio.len > sizeof(*gpr)) {
1602                 printk("Bad MMIO length: %d", run->mmio.len);
1603                 er = EMULATE_FAIL;
1604                 goto done;
1605         }
1606
1607         /*
1608          * Update PC and hold onto current PC in case there is
1609          * an error and we want to rollback the PC
1610          */
1611         curr_pc = vcpu->arch.pc;
1612         er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1613         if (er == EMULATE_FAIL)
1614                 return er;
1615
1616         switch (run->mmio.len) {
1617         case 4:
1618                 *gpr = *(int32_t *) run->mmio.data;
1619                 break;
1620
1621         case 2:
1622                 if (vcpu->mmio_needed == 2)
1623                         *gpr = *(int16_t *) run->mmio.data;
1624                 else
1625                         *gpr = *(int16_t *) run->mmio.data;
1626
1627                 break;
1628         case 1:
1629                 if (vcpu->mmio_needed == 2)
1630                         *gpr = *(int8_t *) run->mmio.data;
1631                 else
1632                         *gpr = *(u8 *) run->mmio.data;
1633                 break;
1634         }
1635
1636         if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1637                 kvm_debug
1638                     ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1639                      vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1640                      vcpu->mmio_needed);
1641
1642 done:
1643         return er;
1644 }
1645
1646 static enum emulation_result
1647 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1648                      struct kvm_run *run, struct kvm_vcpu *vcpu)
1649 {
1650         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1651         struct mips_coproc *cop0 = vcpu->arch.cop0;
1652         struct kvm_vcpu_arch *arch = &vcpu->arch;
1653         enum emulation_result er = EMULATE_DONE;
1654
1655         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1656                 /* save old pc */
1657                 kvm_write_c0_guest_epc(cop0, arch->pc);
1658                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1659
1660                 if (cause & CAUSEF_BD)
1661                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1662                 else
1663                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1664
1665                 kvm_change_c0_guest_cause(cop0, (0xff),
1666                                           (exccode << CAUSEB_EXCCODE));
1667
1668                 /* Set PC to the exception entry point */
1669                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1670                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1671
1672                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1673                           exccode, kvm_read_c0_guest_epc(cop0),
1674                           kvm_read_c0_guest_badvaddr(cop0));
1675         } else {
1676                 printk("Trying to deliver EXC when EXL is already set\n");
1677                 er = EMULATE_FAIL;
1678         }
1679
1680         return er;
1681 }
1682
1683 enum emulation_result
1684 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1685                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1686 {
1687         enum emulation_result er = EMULATE_DONE;
1688         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1689         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1690
1691         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1692
1693         if (usermode) {
1694                 switch (exccode) {
1695                 case T_INT:
1696                 case T_SYSCALL:
1697                 case T_BREAK:
1698                 case T_RES_INST:
1699                         break;
1700
1701                 case T_COP_UNUSABLE:
1702                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1703                                 er = EMULATE_PRIV_FAIL;
1704                         break;
1705
1706                 case T_TLB_MOD:
1707                         break;
1708
1709                 case T_TLB_LD_MISS:
1710                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1711                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1712                                 printk("%s: LD MISS @ %#lx\n", __func__,
1713                                        badvaddr);
1714                                 cause &= ~0xff;
1715                                 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1716                                 er = EMULATE_PRIV_FAIL;
1717                         }
1718                         break;
1719
1720                 case T_TLB_ST_MISS:
1721                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1722                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1723                                 printk("%s: ST MISS @ %#lx\n", __func__,
1724                                        badvaddr);
1725                                 cause &= ~0xff;
1726                                 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1727                                 er = EMULATE_PRIV_FAIL;
1728                         }
1729                         break;
1730
1731                 case T_ADDR_ERR_ST:
1732                         printk("%s: address error ST @ %#lx\n", __func__,
1733                                badvaddr);
1734                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1735                                 cause &= ~0xff;
1736                                 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1737                         }
1738                         er = EMULATE_PRIV_FAIL;
1739                         break;
1740                 case T_ADDR_ERR_LD:
1741                         printk("%s: address error LD @ %#lx\n", __func__,
1742                                badvaddr);
1743                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1744                                 cause &= ~0xff;
1745                                 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1746                         }
1747                         er = EMULATE_PRIV_FAIL;
1748                         break;
1749                 default:
1750                         er = EMULATE_PRIV_FAIL;
1751                         break;
1752                 }
1753         }
1754
1755         if (er == EMULATE_PRIV_FAIL) {
1756                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1757         }
1758         return er;
1759 }
1760
1761 /* User Address (UA) fault, this could happen if
1762  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1763  *     case we pass on the fault to the guest kernel and let it handle it.
1764  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1765  *     case we inject the TLB from the Guest TLB into the shadow host TLB
1766  */
1767 enum emulation_result
1768 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1769                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1770 {
1771         enum emulation_result er = EMULATE_DONE;
1772         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1773         unsigned long va = vcpu->arch.host_cp0_badvaddr;
1774         int index;
1775
1776         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1777                   vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1778
1779         /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1780          * Check the Guest TLB, if the entry is not there then send the guest an
1781          * exception. The guest exc handler should then inject an entry into the
1782          * guest TLB
1783          */
1784         index = kvm_mips_guest_tlb_lookup(vcpu,
1785                                           (va & VPN2_MASK) |
1786                                           ASID_MASK(kvm_read_c0_guest_entryhi
1787                                            (vcpu->arch.cop0)));
1788         if (index < 0) {
1789                 if (exccode == T_TLB_LD_MISS) {
1790                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1791                 } else if (exccode == T_TLB_ST_MISS) {
1792                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1793                 } else {
1794                         printk("%s: invalid exc code: %d\n", __func__, exccode);
1795                         er = EMULATE_FAIL;
1796                 }
1797         } else {
1798                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1799
1800                 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1801                 if (!TLB_IS_VALID(*tlb, va)) {
1802                         if (exccode == T_TLB_LD_MISS) {
1803                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1804                                                                 vcpu);
1805                         } else if (exccode == T_TLB_ST_MISS) {
1806                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1807                                                                 vcpu);
1808                         } else {
1809                                 printk("%s: invalid exc code: %d\n", __func__,
1810                                        exccode);
1811                                 er = EMULATE_FAIL;
1812                         }
1813                 } else {
1814 #ifdef DEBUG
1815                         kvm_debug
1816                             ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1817                              tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1818 #endif
1819                         /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1820                         kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1821                                                              NULL);
1822                 }
1823         }
1824
1825         return er;
1826 }