KVM: x86 emulator: Add sysexit emulation
authorAndre Przywara <andre.przywara@amd.com>
Thu, 18 Jun 2009 10:56:02 +0000 (12:56 +0200)
committerAvi Kivity <avi@redhat.com>
Thu, 10 Sep 2009 05:33:01 +0000 (08:33 +0300)
Handle #UD intercept of the sysexit instruction in 64bit mode returning to
32bit compat mode on an AMD host.
Setup the segment descriptors for CS and SS and the EIP/ESP registers
according to the manual.

Signed-off-by: Christoph Egger <christoph.egger@amd.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/x86_emulate.c

index 7a9bddb3ebd4162f27b5eab9500f84078b6462d9..c6663d46f328b4deafbe6f535741e0bb474aaa7e 100644 (file)
@@ -1541,6 +1541,73 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
        return 0;
 }
 
+static int
+emulate_sysexit(struct x86_emulate_ctxt *ctxt)
+{
+       struct decode_cache *c = &ctxt->decode;
+       struct kvm_segment cs, ss;
+       u64 msr_data;
+       int usermode;
+
+       /* inject #UD if LOCK prefix is used */
+       if (c->lock_prefix)
+               return -1;
+
+       /* inject #GP if in real mode or paging is disabled */
+       if (ctxt->mode == X86EMUL_MODE_REAL
+               || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
+               kvm_inject_gp(ctxt->vcpu, 0);
+               return -1;
+       }
+
+       /* sysexit must be called from CPL 0 */
+       if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
+               kvm_inject_gp(ctxt->vcpu, 0);
+               return -1;
+       }
+
+       setup_syscalls_segments(ctxt, &cs, &ss);
+
+       if ((c->rex_prefix & 0x8) != 0x0)
+               usermode = X86EMUL_MODE_PROT64;
+       else
+               usermode = X86EMUL_MODE_PROT32;
+
+       cs.dpl = 3;
+       ss.dpl = 3;
+       kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
+       switch (usermode) {
+       case X86EMUL_MODE_PROT32:
+               cs.selector = (u16)(msr_data + 16);
+               if ((msr_data & 0xfffc) == 0x0) {
+                       kvm_inject_gp(ctxt->vcpu, 0);
+                       return -1;
+               }
+               ss.selector = (u16)(msr_data + 24);
+               break;
+       case X86EMUL_MODE_PROT64:
+               cs.selector = (u16)(msr_data + 32);
+               if (msr_data == 0x0) {
+                       kvm_inject_gp(ctxt->vcpu, 0);
+                       return -1;
+               }
+               ss.selector = cs.selector + 8;
+               cs.db = 0;
+               cs.l = 1;
+               break;
+       }
+       cs.selector |= SELECTOR_RPL_MASK;
+       ss.selector |= SELECTOR_RPL_MASK;
+
+       kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
+       kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
+
+       c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
+       c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
+
+       return 0;
+}
+
 int
 x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
 {
@@ -2215,7 +2282,10 @@ twobyte_insn:
                        goto writeback;
                break;
        case 0x35:              /* sysexit */
-               goto cannot_emulate;
+               if (emulate_sysexit(ctxt) == -1)
+                       goto cannot_emulate;
+               else
+                       goto writeback;
                break;
        case 0x40 ... 0x4f:     /* cmov */
                c->dst.val = c->dst.orig_val = c->src.val;