Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kvm / book3s_pr.c
index 7759053d391b87298a4888c1cbbb7463ee6e8431..a1baec340f7ee3e0492d3fcd86e14e41a8811ccf 100644 (file)
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
        if (msr & MSR_POW) {
                if (!vcpu->arch.pending_exceptions) {
                        kvm_vcpu_block(vcpu);
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                        vcpu->stat.halt_wakeup++;
 
                        /* Unset POW bit after we woke up */
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
                }
        }
 
+       /*
+        * When switching from 32 to 64-bit, we may have a stale 32-bit
+        * magic page around, we need to flush it. Typically 32-bit magic
+        * page will be instanciated when calling into RTAS. Note: We
+        * assume that such transition only happens while in kernel mode,
+        * ie, we never transition from user 32-bit to kernel 64-bit with
+        * a 32-bit magic page around.
+        */
+       if (vcpu->arch.magic_page_pa &&
+           !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
+               /* going from RTAS to normal kernel code */
+               kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
+                                    ~0xFFFUL);
+       }
+
        /* Preload FPU if it's enabled */
        if (vcpu->arch.shared->msr & MSR_FP)
                kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        ulong mp_pa = vcpu->arch.magic_page_pa;
 
+       if (!(vcpu->arch.shared->msr & MSR_SF))
+               mp_pa = (uint32_t)mp_pa;
+
        if (unlikely(mp_pa) &&
            unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
                return 1;
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                /* MMIO */
                vcpu->stat.mmio_exits++;
                vcpu->arch.paddr_accessed = pte.raddr;
+               vcpu->arch.vaddr_accessed = pte.eaddr;
                r = kvmppc_emulate_mmio(run, vcpu);
                if ( r == RESUME_HOST_NV )
                        r = RESUME_HOST;
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
        run->exit_reason = KVM_EXIT_UNKNOWN;
        run->ready_for_interrupt_injection = 1;
 
+       /* We get here with MSR.EE=0, so enable it to be a nice citizen */
+       __hard_irq_enable();
+
        trace_kvm_book3s_exit(exit_nr, vcpu);
        preempt_enable();
        kvm_resched(vcpu);
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                break;
        /* We're good on these - the host merely wanted to get our attention */
        case BOOK3S_INTERRUPT_DECREMENTER:
+       case BOOK3S_INTERRUPT_HV_DECREMENTER:
                vcpu->stat.dec_exits++;
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_EXTERNAL:
+       case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
+       case BOOK3S_INTERRUPT_EXTERNAL_HV:
                vcpu->stat.ext_intr_exits++;
                r = RESUME_GUEST;
                break;
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                r = RESUME_GUEST;
                break;
        case BOOK3S_INTERRUPT_PROGRAM:
+       case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
        {
                enum emulation_result er;
                struct kvmppc_book3s_shadow_vcpu *svcpu;
@@ -1131,6 +1158,31 @@ out:
        return r;
 }
 
+#ifdef CONFIG_PPC64
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+       /* No flags */
+       info->flags = 0;
+
+       /* SLB is always 64 entries */
+       info->slb_size = 64;
+
+       /* Standard 4k base page size segment */
+       info->sps[0].page_shift = 12;
+       info->sps[0].slb_enc = 0;
+       info->sps[0].enc[0].page_shift = 12;
+       info->sps[0].enc[0].pte_enc = 0;
+
+       /* Standard 16M large page size segment */
+       info->sps[1].page_shift = 24;
+       info->sps[1].slb_enc = SLB_VSID_L;
+       info->sps[1].enc[0].page_shift = 24;
+       info->sps[1].enc[0].pte_enc = 0;
+
+       return 0;
+}
+#endif /* CONFIG_PPC64 */
+
 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
                                      struct kvm_userspace_memory_region *mem)
 {
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
 
 int kvmppc_core_init_vm(struct kvm *kvm)
 {
+#ifdef CONFIG_PPC64
+       INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+#endif
+
        return 0;
 }
 
 void kvmppc_core_destroy_vm(struct kvm *kvm)
 {
+#ifdef CONFIG_PPC64
+       WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
 }
 
 static int kvmppc_book3s_init(void)