1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #define DPRINTF(x...) do {} while (0)
26 #include <linux/module.h>
27 #include <asm/kvm_emulate.h>
33 * Opcode effective-address decode tables.
34 * Note that we only emulate instructions that have at least one memory
35 * operand (excluding implicit stack references). We assume that stack
36 * references and instruction fetches will never occur in special memory
37 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
41 /* Operand sizes: 8-bit operands or specified/overridden size. */
42 #define ByteOp (1<<0) /* 8-bit operands. */
43 /* Destination operand type. */
44 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
45 #define DstReg (2<<1) /* Register operand. */
46 #define DstMem (3<<1) /* Memory operand. */
47 #define DstAcc (4<<1) /* Destination Accumulator */
48 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
49 #define DstMem64 (6<<1) /* 64bit memory operand */
50 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
51 #define DstMask (7<<1)
52 /* Source operand type. */
53 #define SrcNone (0<<4) /* No source operand. */
54 #define SrcReg (1<<4) /* Register operand. */
55 #define SrcMem (2<<4) /* Memory operand. */
56 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
57 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
58 #define SrcImm (5<<4) /* Immediate operand. */
59 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
60 #define SrcOne (7<<4) /* Implied '1' */
61 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
62 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
63 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
64 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
65 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
66 #define SrcAcc (0xd<<4) /* Source Accumulator */
67 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
68 #define SrcMask (0xf<<4)
69 /* Generic ModRM decode. */
71 /* Destination is only written; never read. */
74 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
75 #define String (1<<12) /* String instruction (rep capable) */
76 #define Stack (1<<13) /* Stack instruction (push/pop) */
77 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
78 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
80 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
81 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
82 #define Undefined (1<<25) /* No Such Instruction */
83 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
84 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
86 /* Source 2 operand type */
87 #define Src2None (0<<29)
88 #define Src2CL (1<<29)
89 #define Src2ImmByte (2<<29)
90 #define Src2One (3<<29)
91 #define Src2Imm (4<<29)
92 #define Src2Mask (7<<29)
95 #define X3(x...) X2(x), x
96 #define X4(x...) X2(x), X2(x)
97 #define X5(x...) X4(x), x
98 #define X6(x...) X4(x), X2(x)
99 #define X7(x...) X4(x), X3(x)
100 #define X8(x...) X4(x), X4(x)
101 #define X16(x...) X8(x), X8(x)
106 int (*execute)(struct x86_emulate_ctxt *ctxt);
107 struct opcode *group;
108 struct group_dual *gdual;
113 struct opcode mod012[8];
114 struct opcode mod3[8];
117 /* EFLAGS bit definitions. */
118 #define EFLG_ID (1<<21)
119 #define EFLG_VIP (1<<20)
120 #define EFLG_VIF (1<<19)
121 #define EFLG_AC (1<<18)
122 #define EFLG_VM (1<<17)
123 #define EFLG_RF (1<<16)
124 #define EFLG_IOPL (3<<12)
125 #define EFLG_NT (1<<14)
126 #define EFLG_OF (1<<11)
127 #define EFLG_DF (1<<10)
128 #define EFLG_IF (1<<9)
129 #define EFLG_TF (1<<8)
130 #define EFLG_SF (1<<7)
131 #define EFLG_ZF (1<<6)
132 #define EFLG_AF (1<<4)
133 #define EFLG_PF (1<<2)
134 #define EFLG_CF (1<<0)
136 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
137 #define EFLG_RESERVED_ONE_MASK 2
140 * Instruction emulation:
141 * Most instructions are emulated directly via a fragment of inline assembly
142 * code. This allows us to save/restore EFLAGS and thus very easily pick up
143 * any modified flags.
146 #if defined(CONFIG_X86_64)
147 #define _LO32 "k" /* force 32-bit operand */
148 #define _STK "%%rsp" /* stack pointer */
149 #elif defined(__i386__)
150 #define _LO32 "" /* force 32-bit operand */
151 #define _STK "%%esp" /* stack pointer */
155 * These EFLAGS bits are restored from saved value during emulation, and
156 * any changes are written back to the saved value after emulation.
158 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
160 /* Before executing instruction: restore necessary bits in EFLAGS. */
161 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
162 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
163 "movl %"_sav",%"_LO32 _tmp"; " \
166 "movl %"_msk",%"_LO32 _tmp"; " \
167 "andl %"_LO32 _tmp",("_STK"); " \
169 "notl %"_LO32 _tmp"; " \
170 "andl %"_LO32 _tmp",("_STK"); " \
171 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
173 "orl %"_LO32 _tmp",("_STK"); " \
177 /* After executing instruction: write-back necessary bits in EFLAGS. */
178 #define _POST_EFLAGS(_sav, _msk, _tmp) \
179 /* _sav |= EFLAGS & _msk; */ \
182 "andl %"_msk",%"_LO32 _tmp"; " \
183 "orl %"_LO32 _tmp",%"_sav"; "
191 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
193 __asm__ __volatile__ ( \
194 _PRE_EFLAGS("0", "4", "2") \
195 _op _suffix " %"_x"3,%1; " \
196 _POST_EFLAGS("0", "4", "2") \
197 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
199 : _y ((_src).val), "i" (EFLAGS_MASK)); \
203 /* Raw emulation: instruction has two explicit operands. */
204 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
206 unsigned long _tmp; \
208 switch ((_dst).bytes) { \
210 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
213 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
216 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
221 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
223 unsigned long _tmp; \
224 switch ((_dst).bytes) { \
226 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
229 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
230 _wx, _wy, _lx, _ly, _qx, _qy); \
235 /* Source operand is byte-sized and may be restricted to just %cl. */
236 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
237 __emulate_2op(_op, _src, _dst, _eflags, \
238 "b", "c", "b", "c", "b", "c", "b", "c")
240 /* Source operand is byte, word, long or quad sized. */
241 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
242 __emulate_2op(_op, _src, _dst, _eflags, \
243 "b", "q", "w", "r", _LO32, "r", "", "r")
245 /* Source operand is word, long or quad sized. */
246 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
247 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
248 "w", "r", _LO32, "r", "", "r")
250 /* Instruction has three operands and one operand is stored in ECX register */
251 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
253 unsigned long _tmp; \
254 _type _clv = (_cl).val; \
255 _type _srcv = (_src).val; \
256 _type _dstv = (_dst).val; \
258 __asm__ __volatile__ ( \
259 _PRE_EFLAGS("0", "5", "2") \
260 _op _suffix " %4,%1 \n" \
261 _POST_EFLAGS("0", "5", "2") \
262 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
263 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
266 (_cl).val = (unsigned long) _clv; \
267 (_src).val = (unsigned long) _srcv; \
268 (_dst).val = (unsigned long) _dstv; \
271 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
273 switch ((_dst).bytes) { \
275 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
276 "w", unsigned short); \
279 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
280 "l", unsigned int); \
283 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
284 "q", unsigned long)); \
289 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
291 unsigned long _tmp; \
293 __asm__ __volatile__ ( \
294 _PRE_EFLAGS("0", "3", "2") \
295 _op _suffix " %1; " \
296 _POST_EFLAGS("0", "3", "2") \
297 : "=m" (_eflags), "+m" ((_dst).val), \
299 : "i" (EFLAGS_MASK)); \
302 /* Instruction has only one explicit operand (no source operand). */
303 #define emulate_1op(_op, _dst, _eflags) \
305 switch ((_dst).bytes) { \
306 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
307 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
308 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
309 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
313 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
315 unsigned long _tmp; \
317 __asm__ __volatile__ ( \
318 _PRE_EFLAGS("0", "4", "1") \
319 _op _suffix " %5; " \
320 _POST_EFLAGS("0", "4", "1") \
321 : "=m" (_eflags), "=&r" (_tmp), \
322 "+a" (_rax), "+d" (_rdx) \
323 : "i" (EFLAGS_MASK), "m" ((_src).val), \
324 "a" (_rax), "d" (_rdx)); \
327 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
329 unsigned long _tmp; \
331 __asm__ __volatile__ ( \
332 _PRE_EFLAGS("0", "5", "1") \
334 _op _suffix " %6; " \
336 _POST_EFLAGS("0", "5", "1") \
337 ".pushsection .fixup,\"ax\" \n\t" \
338 "3: movb $1, %4 \n\t" \
341 _ASM_EXTABLE(1b, 3b) \
342 : "=m" (_eflags), "=&r" (_tmp), \
343 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
344 : "i" (EFLAGS_MASK), "m" ((_src).val), \
345 "a" (_rax), "d" (_rdx)); \
348 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
349 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
351 switch((_src).bytes) { \
352 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
353 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
354 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
355 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
359 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
361 switch((_src).bytes) { \
363 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
364 _eflags, "b", _ex); \
367 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
368 _eflags, "w", _ex); \
371 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
372 _eflags, "l", _ex); \
375 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
376 _eflags, "q", _ex)); \
381 /* Fetch next part of the instruction being emulated. */
382 #define insn_fetch(_type, _size, _eip) \
383 ({ unsigned long _x; \
384 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
385 if (rc != X86EMUL_CONTINUE) \
391 #define insn_fetch_arr(_arr, _size, _eip) \
392 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
393 if (rc != X86EMUL_CONTINUE) \
398 static inline unsigned long ad_mask(struct decode_cache *c)
400 return (1UL << (c->ad_bytes << 3)) - 1;
403 /* Access/update address held in a register, based on addressing mode. */
404 static inline unsigned long
405 address_mask(struct decode_cache *c, unsigned long reg)
407 if (c->ad_bytes == sizeof(unsigned long))
410 return reg & ad_mask(c);
413 static inline unsigned long
414 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
416 return base + address_mask(c, reg);
420 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
422 if (c->ad_bytes == sizeof(unsigned long))
425 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
428 static inline void jmp_rel(struct decode_cache *c, int rel)
430 register_address_increment(c, &c->eip, rel);
433 static void set_seg_override(struct decode_cache *c, int seg)
435 c->has_seg_override = true;
436 c->seg_override = seg;
439 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
440 struct x86_emulate_ops *ops, int seg)
442 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
445 return ops->get_cached_segment_base(seg, ctxt->vcpu);
448 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
449 struct x86_emulate_ops *ops,
450 struct decode_cache *c)
452 if (!c->has_seg_override)
455 return seg_base(ctxt, ops, c->seg_override);
458 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
459 struct x86_emulate_ops *ops)
461 return seg_base(ctxt, ops, VCPU_SREG_ES);
464 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
465 struct x86_emulate_ops *ops)
467 return seg_base(ctxt, ops, VCPU_SREG_SS);
470 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
471 u32 error, bool valid)
473 ctxt->exception = vec;
474 ctxt->error_code = error;
475 ctxt->error_code_valid = valid;
478 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
480 emulate_exception(ctxt, GP_VECTOR, err, true);
483 static void emulate_pf(struct x86_emulate_ctxt *ctxt)
485 emulate_exception(ctxt, PF_VECTOR, 0, true);
488 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
490 emulate_exception(ctxt, UD_VECTOR, 0, false);
493 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
495 emulate_exception(ctxt, TS_VECTOR, err, true);
498 static int emulate_de(struct x86_emulate_ctxt *ctxt)
500 emulate_exception(ctxt, DE_VECTOR, 0, false);
501 return X86EMUL_PROPAGATE_FAULT;
504 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
505 struct x86_emulate_ops *ops,
506 unsigned long eip, u8 *dest)
508 struct fetch_cache *fc = &ctxt->decode.fetch;
512 if (eip == fc->end) {
513 cur_size = fc->end - fc->start;
514 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
515 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
516 size, ctxt->vcpu, NULL);
517 if (rc != X86EMUL_CONTINUE)
521 *dest = fc->data[eip - fc->start];
522 return X86EMUL_CONTINUE;
525 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
526 struct x86_emulate_ops *ops,
527 unsigned long eip, void *dest, unsigned size)
531 /* x86 instructions are limited to 15 bytes. */
532 if (eip + size - ctxt->eip > 15)
533 return X86EMUL_UNHANDLEABLE;
535 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
536 if (rc != X86EMUL_CONTINUE)
539 return X86EMUL_CONTINUE;
543 * Given the 'reg' portion of a ModRM byte, and a register block, return a
544 * pointer into the block that addresses the relevant register.
545 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
547 static void *decode_register(u8 modrm_reg, unsigned long *regs,
552 p = ®s[modrm_reg];
553 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
554 p = (unsigned char *)®s[modrm_reg & 3] + 1;
558 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
559 struct x86_emulate_ops *ops,
561 u16 *size, unsigned long *address, int op_bytes)
568 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
569 if (rc != X86EMUL_CONTINUE)
571 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
575 static int test_cc(unsigned int condition, unsigned int flags)
579 switch ((condition & 15) >> 1) {
581 rc |= (flags & EFLG_OF);
583 case 1: /* b/c/nae */
584 rc |= (flags & EFLG_CF);
587 rc |= (flags & EFLG_ZF);
590 rc |= (flags & (EFLG_CF|EFLG_ZF));
593 rc |= (flags & EFLG_SF);
596 rc |= (flags & EFLG_PF);
599 rc |= (flags & EFLG_ZF);
602 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
606 /* Odd condition identifiers (lsb == 1) have inverted sense. */
607 return (!!rc ^ (condition & 1));
610 static void fetch_register_operand(struct operand *op)
614 op->val = *(u8 *)op->addr.reg;
617 op->val = *(u16 *)op->addr.reg;
620 op->val = *(u32 *)op->addr.reg;
623 op->val = *(u64 *)op->addr.reg;
628 static void decode_register_operand(struct operand *op,
629 struct decode_cache *c,
632 unsigned reg = c->modrm_reg;
633 int highbyte_regs = c->rex_prefix == 0;
636 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
638 if ((c->d & ByteOp) && !inhibit_bytereg) {
639 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
642 op->addr.reg = decode_register(reg, c->regs, 0);
643 op->bytes = c->op_bytes;
645 fetch_register_operand(op);
646 op->orig_val = op->val;
649 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
650 struct x86_emulate_ops *ops,
653 struct decode_cache *c = &ctxt->decode;
655 int index_reg = 0, base_reg = 0, scale;
656 int rc = X86EMUL_CONTINUE;
660 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
661 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
662 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
665 c->modrm = insn_fetch(u8, 1, c->eip);
666 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
667 c->modrm_reg |= (c->modrm & 0x38) >> 3;
668 c->modrm_rm |= (c->modrm & 0x07);
669 c->modrm_seg = VCPU_SREG_DS;
671 if (c->modrm_mod == 3) {
673 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
674 op->addr.reg = decode_register(c->modrm_rm,
675 c->regs, c->d & ByteOp);
676 fetch_register_operand(op);
682 if (c->ad_bytes == 2) {
683 unsigned bx = c->regs[VCPU_REGS_RBX];
684 unsigned bp = c->regs[VCPU_REGS_RBP];
685 unsigned si = c->regs[VCPU_REGS_RSI];
686 unsigned di = c->regs[VCPU_REGS_RDI];
688 /* 16-bit ModR/M decode. */
689 switch (c->modrm_mod) {
691 if (c->modrm_rm == 6)
692 modrm_ea += insn_fetch(u16, 2, c->eip);
695 modrm_ea += insn_fetch(s8, 1, c->eip);
698 modrm_ea += insn_fetch(u16, 2, c->eip);
701 switch (c->modrm_rm) {
721 if (c->modrm_mod != 0)
728 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
729 (c->modrm_rm == 6 && c->modrm_mod != 0))
730 c->modrm_seg = VCPU_SREG_SS;
731 modrm_ea = (u16)modrm_ea;
733 /* 32/64-bit ModR/M decode. */
734 if ((c->modrm_rm & 7) == 4) {
735 sib = insn_fetch(u8, 1, c->eip);
736 index_reg |= (sib >> 3) & 7;
740 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
741 modrm_ea += insn_fetch(s32, 4, c->eip);
743 modrm_ea += c->regs[base_reg];
745 modrm_ea += c->regs[index_reg] << scale;
746 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
747 if (ctxt->mode == X86EMUL_MODE_PROT64)
750 modrm_ea += c->regs[c->modrm_rm];
751 switch (c->modrm_mod) {
753 if (c->modrm_rm == 5)
754 modrm_ea += insn_fetch(s32, 4, c->eip);
757 modrm_ea += insn_fetch(s8, 1, c->eip);
760 modrm_ea += insn_fetch(s32, 4, c->eip);
764 op->addr.mem = modrm_ea;
769 static int decode_abs(struct x86_emulate_ctxt *ctxt,
770 struct x86_emulate_ops *ops,
773 struct decode_cache *c = &ctxt->decode;
774 int rc = X86EMUL_CONTINUE;
777 switch (c->ad_bytes) {
779 op->addr.mem = insn_fetch(u16, 2, c->eip);
782 op->addr.mem = insn_fetch(u32, 4, c->eip);
785 op->addr.mem = insn_fetch(u64, 8, c->eip);
792 static void fetch_bit_operand(struct decode_cache *c)
796 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
797 mask = ~(c->dst.bytes * 8 - 1);
799 if (c->src.bytes == 2)
800 sv = (s16)c->src.val & (s16)mask;
801 else if (c->src.bytes == 4)
802 sv = (s32)c->src.val & (s32)mask;
804 c->dst.addr.mem += (sv >> 3);
807 /* only subword offset */
808 c->src.val &= (c->dst.bytes << 3) - 1;
811 static int read_emulated(struct x86_emulate_ctxt *ctxt,
812 struct x86_emulate_ops *ops,
813 unsigned long addr, void *dest, unsigned size)
816 struct read_cache *mc = &ctxt->decode.mem_read;
820 int n = min(size, 8u);
822 if (mc->pos < mc->end)
825 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
827 if (rc == X86EMUL_PROPAGATE_FAULT)
829 if (rc != X86EMUL_CONTINUE)
834 memcpy(dest, mc->data + mc->pos, n);
839 return X86EMUL_CONTINUE;
842 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
843 struct x86_emulate_ops *ops,
844 unsigned int size, unsigned short port,
847 struct read_cache *rc = &ctxt->decode.io_read;
849 if (rc->pos == rc->end) { /* refill pio read ahead */
850 struct decode_cache *c = &ctxt->decode;
851 unsigned int in_page, n;
852 unsigned int count = c->rep_prefix ?
853 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
854 in_page = (ctxt->eflags & EFLG_DF) ?
855 offset_in_page(c->regs[VCPU_REGS_RDI]) :
856 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
857 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
861 rc->pos = rc->end = 0;
862 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
867 memcpy(dest, rc->data + rc->pos, size);
872 static u32 desc_limit_scaled(struct desc_struct *desc)
874 u32 limit = get_desc_limit(desc);
876 return desc->g ? (limit << 12) | 0xfff : limit;
879 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
880 struct x86_emulate_ops *ops,
881 u16 selector, struct desc_ptr *dt)
883 if (selector & 1 << 2) {
884 struct desc_struct desc;
885 memset (dt, 0, sizeof *dt);
886 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
889 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
890 dt->address = get_desc_base(&desc);
892 ops->get_gdt(dt, ctxt->vcpu);
895 /* allowed just for 8 bytes segments */
896 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
897 struct x86_emulate_ops *ops,
898 u16 selector, struct desc_struct *desc)
901 u16 index = selector >> 3;
906 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
908 if (dt.size < index * 8 + 7) {
909 emulate_gp(ctxt, selector & 0xfffc);
910 return X86EMUL_PROPAGATE_FAULT;
912 addr = dt.address + index * 8;
913 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
914 if (ret == X86EMUL_PROPAGATE_FAULT)
920 /* allowed just for 8 bytes segments */
921 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
922 struct x86_emulate_ops *ops,
923 u16 selector, struct desc_struct *desc)
926 u16 index = selector >> 3;
931 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
933 if (dt.size < index * 8 + 7) {
934 emulate_gp(ctxt, selector & 0xfffc);
935 return X86EMUL_PROPAGATE_FAULT;
938 addr = dt.address + index * 8;
939 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
940 if (ret == X86EMUL_PROPAGATE_FAULT)
946 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
947 struct x86_emulate_ops *ops,
948 u16 selector, int seg)
950 struct desc_struct seg_desc;
952 unsigned err_vec = GP_VECTOR;
954 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
957 memset(&seg_desc, 0, sizeof seg_desc);
959 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
960 || ctxt->mode == X86EMUL_MODE_REAL) {
961 /* set real mode segment descriptor */
962 set_desc_base(&seg_desc, selector << 4);
963 set_desc_limit(&seg_desc, 0xffff);
970 /* NULL selector is not valid for TR, CS and SS */
971 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
975 /* TR should be in GDT only */
976 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
979 if (null_selector) /* for NULL selector skip all following checks */
982 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
983 if (ret != X86EMUL_CONTINUE)
986 err_code = selector & 0xfffc;
989 /* can't load system descriptor into segment selecor */
990 if (seg <= VCPU_SREG_GS && !seg_desc.s)
994 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1000 cpl = ops->cpl(ctxt->vcpu);
1005 * segment is not a writable data segment or segment
1006 * selector's RPL != CPL or segment selector's RPL != CPL
1008 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1012 if (!(seg_desc.type & 8))
1015 if (seg_desc.type & 4) {
1021 if (rpl > cpl || dpl != cpl)
1024 /* CS(RPL) <- CPL */
1025 selector = (selector & 0xfffc) | cpl;
1028 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1031 case VCPU_SREG_LDTR:
1032 if (seg_desc.s || seg_desc.type != 2)
1035 default: /* DS, ES, FS, or GS */
1037 * segment is not a data or readable code segment or
1038 * ((segment is a data or nonconforming code segment)
1039 * and (both RPL and CPL > DPL))
1041 if ((seg_desc.type & 0xa) == 0x8 ||
1042 (((seg_desc.type & 0xc) != 0xc) &&
1043 (rpl > dpl && cpl > dpl)))
1049 /* mark segment as accessed */
1051 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1052 if (ret != X86EMUL_CONTINUE)
1056 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1057 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1058 return X86EMUL_CONTINUE;
1060 emulate_exception(ctxt, err_vec, err_code, true);
1061 return X86EMUL_PROPAGATE_FAULT;
1064 static void write_register_operand(struct operand *op)
1066 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1067 switch (op->bytes) {
1069 *(u8 *)op->addr.reg = (u8)op->val;
1072 *(u16 *)op->addr.reg = (u16)op->val;
1075 *op->addr.reg = (u32)op->val;
1076 break; /* 64b: zero-extend */
1078 *op->addr.reg = op->val;
1083 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1084 struct x86_emulate_ops *ops)
1087 struct decode_cache *c = &ctxt->decode;
1090 switch (c->dst.type) {
1092 write_register_operand(&c->dst);
1096 rc = ops->cmpxchg_emulated(
1104 rc = ops->write_emulated(
1110 if (rc == X86EMUL_PROPAGATE_FAULT)
1112 if (rc != X86EMUL_CONTINUE)
1121 return X86EMUL_CONTINUE;
1124 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1125 struct x86_emulate_ops *ops)
1127 struct decode_cache *c = &ctxt->decode;
1129 c->dst.type = OP_MEM;
1130 c->dst.bytes = c->op_bytes;
1131 c->dst.val = c->src.val;
1132 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1133 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1134 c->regs[VCPU_REGS_RSP]);
1137 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1138 struct x86_emulate_ops *ops,
1139 void *dest, int len)
1141 struct decode_cache *c = &ctxt->decode;
1144 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1145 c->regs[VCPU_REGS_RSP]),
1147 if (rc != X86EMUL_CONTINUE)
1150 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1154 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1155 struct x86_emulate_ops *ops,
1156 void *dest, int len)
1159 unsigned long val, change_mask;
1160 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1161 int cpl = ops->cpl(ctxt->vcpu);
1163 rc = emulate_pop(ctxt, ops, &val, len);
1164 if (rc != X86EMUL_CONTINUE)
1167 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1168 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1170 switch(ctxt->mode) {
1171 case X86EMUL_MODE_PROT64:
1172 case X86EMUL_MODE_PROT32:
1173 case X86EMUL_MODE_PROT16:
1175 change_mask |= EFLG_IOPL;
1177 change_mask |= EFLG_IF;
1179 case X86EMUL_MODE_VM86:
1181 emulate_gp(ctxt, 0);
1182 return X86EMUL_PROPAGATE_FAULT;
1184 change_mask |= EFLG_IF;
1186 default: /* real mode */
1187 change_mask |= (EFLG_IOPL | EFLG_IF);
1191 *(unsigned long *)dest =
1192 (ctxt->eflags & ~change_mask) | (val & change_mask);
1194 if (rc == X86EMUL_PROPAGATE_FAULT)
1200 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1201 struct x86_emulate_ops *ops, int seg)
1203 struct decode_cache *c = &ctxt->decode;
1205 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1207 emulate_push(ctxt, ops);
1210 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1211 struct x86_emulate_ops *ops, int seg)
1213 struct decode_cache *c = &ctxt->decode;
1214 unsigned long selector;
1217 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1218 if (rc != X86EMUL_CONTINUE)
1221 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1225 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1226 struct x86_emulate_ops *ops)
1228 struct decode_cache *c = &ctxt->decode;
1229 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1230 int rc = X86EMUL_CONTINUE;
1231 int reg = VCPU_REGS_RAX;
1233 while (reg <= VCPU_REGS_RDI) {
1234 (reg == VCPU_REGS_RSP) ?
1235 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1237 emulate_push(ctxt, ops);
1239 rc = writeback(ctxt, ops);
1240 if (rc != X86EMUL_CONTINUE)
1246 /* Disable writeback. */
1247 c->dst.type = OP_NONE;
1252 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1253 struct x86_emulate_ops *ops)
1255 struct decode_cache *c = &ctxt->decode;
1256 int rc = X86EMUL_CONTINUE;
1257 int reg = VCPU_REGS_RDI;
1259 while (reg >= VCPU_REGS_RAX) {
1260 if (reg == VCPU_REGS_RSP) {
1261 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1266 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1267 if (rc != X86EMUL_CONTINUE)
1274 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1275 struct x86_emulate_ops *ops, int irq)
1277 struct decode_cache *c = &ctxt->decode;
1285 /* TODO: Add limit checks */
1286 c->src.val = ctxt->eflags;
1287 emulate_push(ctxt, ops);
1288 rc = writeback(ctxt, ops);
1289 if (rc != X86EMUL_CONTINUE)
1292 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1294 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1295 emulate_push(ctxt, ops);
1296 rc = writeback(ctxt, ops);
1297 if (rc != X86EMUL_CONTINUE)
1300 c->src.val = c->eip;
1301 emulate_push(ctxt, ops);
1302 rc = writeback(ctxt, ops);
1303 if (rc != X86EMUL_CONTINUE)
1306 c->dst.type = OP_NONE;
1308 ops->get_idt(&dt, ctxt->vcpu);
1310 eip_addr = dt.address + (irq << 2);
1311 cs_addr = dt.address + (irq << 2) + 2;
1313 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1314 if (rc != X86EMUL_CONTINUE)
1317 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1318 if (rc != X86EMUL_CONTINUE)
1321 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1322 if (rc != X86EMUL_CONTINUE)
1330 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1331 struct x86_emulate_ops *ops, int irq)
1333 switch(ctxt->mode) {
1334 case X86EMUL_MODE_REAL:
1335 return emulate_int_real(ctxt, ops, irq);
1336 case X86EMUL_MODE_VM86:
1337 case X86EMUL_MODE_PROT16:
1338 case X86EMUL_MODE_PROT32:
1339 case X86EMUL_MODE_PROT64:
1341 /* Protected mode interrupts unimplemented yet */
1342 return X86EMUL_UNHANDLEABLE;
1346 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1347 struct x86_emulate_ops *ops)
1349 struct decode_cache *c = &ctxt->decode;
1350 int rc = X86EMUL_CONTINUE;
1351 unsigned long temp_eip = 0;
1352 unsigned long temp_eflags = 0;
1353 unsigned long cs = 0;
1354 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1355 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1356 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1357 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1359 /* TODO: Add stack limit check */
1361 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1363 if (rc != X86EMUL_CONTINUE)
1366 if (temp_eip & ~0xffff) {
1367 emulate_gp(ctxt, 0);
1368 return X86EMUL_PROPAGATE_FAULT;
1371 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1373 if (rc != X86EMUL_CONTINUE)
1376 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1378 if (rc != X86EMUL_CONTINUE)
1381 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1383 if (rc != X86EMUL_CONTINUE)
1389 if (c->op_bytes == 4)
1390 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1391 else if (c->op_bytes == 2) {
1392 ctxt->eflags &= ~0xffff;
1393 ctxt->eflags |= temp_eflags;
1396 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1397 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1402 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1403 struct x86_emulate_ops* ops)
1405 switch(ctxt->mode) {
1406 case X86EMUL_MODE_REAL:
1407 return emulate_iret_real(ctxt, ops);
1408 case X86EMUL_MODE_VM86:
1409 case X86EMUL_MODE_PROT16:
1410 case X86EMUL_MODE_PROT32:
1411 case X86EMUL_MODE_PROT64:
1413 /* iret from protected mode unimplemented yet */
1414 return X86EMUL_UNHANDLEABLE;
1418 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1419 struct x86_emulate_ops *ops)
1421 struct decode_cache *c = &ctxt->decode;
1423 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1426 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1428 struct decode_cache *c = &ctxt->decode;
1429 switch (c->modrm_reg) {
1431 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1434 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1437 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1440 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1442 case 4: /* sal/shl */
1443 case 6: /* sal/shl */
1444 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1447 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1450 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1455 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1456 struct x86_emulate_ops *ops)
1458 struct decode_cache *c = &ctxt->decode;
1459 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1460 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1463 switch (c->modrm_reg) {
1464 case 0 ... 1: /* test */
1465 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1468 c->dst.val = ~c->dst.val;
1471 emulate_1op("neg", c->dst, ctxt->eflags);
1474 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1477 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1480 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1484 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1488 return X86EMUL_UNHANDLEABLE;
1491 return emulate_de(ctxt);
1492 return X86EMUL_CONTINUE;
1495 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1496 struct x86_emulate_ops *ops)
1498 struct decode_cache *c = &ctxt->decode;
1500 switch (c->modrm_reg) {
1502 emulate_1op("inc", c->dst, ctxt->eflags);
1505 emulate_1op("dec", c->dst, ctxt->eflags);
1507 case 2: /* call near abs */ {
1510 c->eip = c->src.val;
1511 c->src.val = old_eip;
1512 emulate_push(ctxt, ops);
1515 case 4: /* jmp abs */
1516 c->eip = c->src.val;
1519 emulate_push(ctxt, ops);
1522 return X86EMUL_CONTINUE;
1525 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1526 struct x86_emulate_ops *ops)
1528 struct decode_cache *c = &ctxt->decode;
1529 u64 old = c->dst.orig_val64;
1531 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1532 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1533 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1534 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1535 ctxt->eflags &= ~EFLG_ZF;
1537 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1538 (u32) c->regs[VCPU_REGS_RBX];
1540 ctxt->eflags |= EFLG_ZF;
1542 return X86EMUL_CONTINUE;
1545 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1546 struct x86_emulate_ops *ops)
1548 struct decode_cache *c = &ctxt->decode;
1552 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1553 if (rc != X86EMUL_CONTINUE)
1555 if (c->op_bytes == 4)
1556 c->eip = (u32)c->eip;
1557 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1558 if (rc != X86EMUL_CONTINUE)
1560 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1564 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1565 struct x86_emulate_ops *ops, int seg)
1567 struct decode_cache *c = &ctxt->decode;
1571 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1573 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1574 if (rc != X86EMUL_CONTINUE)
1577 c->dst.val = c->src.val;
1582 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1583 struct x86_emulate_ops *ops, struct desc_struct *cs,
1584 struct desc_struct *ss)
1586 memset(cs, 0, sizeof(struct desc_struct));
1587 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1588 memset(ss, 0, sizeof(struct desc_struct));
1590 cs->l = 0; /* will be adjusted later */
1591 set_desc_base(cs, 0); /* flat segment */
1592 cs->g = 1; /* 4kb granularity */
1593 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1594 cs->type = 0x0b; /* Read, Execute, Accessed */
1596 cs->dpl = 0; /* will be adjusted later */
1600 set_desc_base(ss, 0); /* flat segment */
1601 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1602 ss->g = 1; /* 4kb granularity */
1604 ss->type = 0x03; /* Read/Write, Accessed */
1605 ss->d = 1; /* 32bit stack segment */
1611 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1613 struct decode_cache *c = &ctxt->decode;
1614 struct desc_struct cs, ss;
1618 /* syscall is not available in real mode */
1619 if (ctxt->mode == X86EMUL_MODE_REAL ||
1620 ctxt->mode == X86EMUL_MODE_VM86) {
1622 return X86EMUL_PROPAGATE_FAULT;
1625 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1627 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1629 cs_sel = (u16)(msr_data & 0xfffc);
1630 ss_sel = (u16)(msr_data + 8);
1632 if (is_long_mode(ctxt->vcpu)) {
1636 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1637 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1638 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1639 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1641 c->regs[VCPU_REGS_RCX] = c->eip;
1642 if (is_long_mode(ctxt->vcpu)) {
1643 #ifdef CONFIG_X86_64
1644 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1646 ops->get_msr(ctxt->vcpu,
1647 ctxt->mode == X86EMUL_MODE_PROT64 ?
1648 MSR_LSTAR : MSR_CSTAR, &msr_data);
1651 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1652 ctxt->eflags &= ~(msr_data | EFLG_RF);
1656 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1657 c->eip = (u32)msr_data;
1659 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1662 return X86EMUL_CONTINUE;
1666 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1668 struct decode_cache *c = &ctxt->decode;
1669 struct desc_struct cs, ss;
1673 /* inject #GP if in real mode */
1674 if (ctxt->mode == X86EMUL_MODE_REAL) {
1675 emulate_gp(ctxt, 0);
1676 return X86EMUL_PROPAGATE_FAULT;
1679 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1680 * Therefore, we inject an #UD.
1682 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1684 return X86EMUL_PROPAGATE_FAULT;
1687 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1689 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1690 switch (ctxt->mode) {
1691 case X86EMUL_MODE_PROT32:
1692 if ((msr_data & 0xfffc) == 0x0) {
1693 emulate_gp(ctxt, 0);
1694 return X86EMUL_PROPAGATE_FAULT;
1697 case X86EMUL_MODE_PROT64:
1698 if (msr_data == 0x0) {
1699 emulate_gp(ctxt, 0);
1700 return X86EMUL_PROPAGATE_FAULT;
1705 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1706 cs_sel = (u16)msr_data;
1707 cs_sel &= ~SELECTOR_RPL_MASK;
1708 ss_sel = cs_sel + 8;
1709 ss_sel &= ~SELECTOR_RPL_MASK;
1710 if (ctxt->mode == X86EMUL_MODE_PROT64
1711 || is_long_mode(ctxt->vcpu)) {
1716 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1717 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1718 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1719 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1721 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1724 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1725 c->regs[VCPU_REGS_RSP] = msr_data;
1727 return X86EMUL_CONTINUE;
1731 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1733 struct decode_cache *c = &ctxt->decode;
1734 struct desc_struct cs, ss;
1739 /* inject #GP if in real mode or Virtual 8086 mode */
1740 if (ctxt->mode == X86EMUL_MODE_REAL ||
1741 ctxt->mode == X86EMUL_MODE_VM86) {
1742 emulate_gp(ctxt, 0);
1743 return X86EMUL_PROPAGATE_FAULT;
1746 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1748 if ((c->rex_prefix & 0x8) != 0x0)
1749 usermode = X86EMUL_MODE_PROT64;
1751 usermode = X86EMUL_MODE_PROT32;
1755 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1757 case X86EMUL_MODE_PROT32:
1758 cs_sel = (u16)(msr_data + 16);
1759 if ((msr_data & 0xfffc) == 0x0) {
1760 emulate_gp(ctxt, 0);
1761 return X86EMUL_PROPAGATE_FAULT;
1763 ss_sel = (u16)(msr_data + 24);
1765 case X86EMUL_MODE_PROT64:
1766 cs_sel = (u16)(msr_data + 32);
1767 if (msr_data == 0x0) {
1768 emulate_gp(ctxt, 0);
1769 return X86EMUL_PROPAGATE_FAULT;
1771 ss_sel = cs_sel + 8;
1776 cs_sel |= SELECTOR_RPL_MASK;
1777 ss_sel |= SELECTOR_RPL_MASK;
1779 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1780 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1781 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1782 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1784 c->eip = c->regs[VCPU_REGS_RDX];
1785 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1787 return X86EMUL_CONTINUE;
1790 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1791 struct x86_emulate_ops *ops)
1794 if (ctxt->mode == X86EMUL_MODE_REAL)
1796 if (ctxt->mode == X86EMUL_MODE_VM86)
1798 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1799 return ops->cpl(ctxt->vcpu) > iopl;
1802 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1803 struct x86_emulate_ops *ops,
1806 struct desc_struct tr_seg;
1809 u8 perm, bit_idx = port & 0x7;
1810 unsigned mask = (1 << len) - 1;
1812 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1815 if (desc_limit_scaled(&tr_seg) < 103)
1817 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1819 if (r != X86EMUL_CONTINUE)
1821 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1823 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1824 &perm, 1, ctxt->vcpu, NULL);
1825 if (r != X86EMUL_CONTINUE)
1827 if ((perm >> bit_idx) & mask)
1832 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1833 struct x86_emulate_ops *ops,
1839 if (emulator_bad_iopl(ctxt, ops))
1840 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1843 ctxt->perm_ok = true;
1848 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1849 struct x86_emulate_ops *ops,
1850 struct tss_segment_16 *tss)
1852 struct decode_cache *c = &ctxt->decode;
1855 tss->flag = ctxt->eflags;
1856 tss->ax = c->regs[VCPU_REGS_RAX];
1857 tss->cx = c->regs[VCPU_REGS_RCX];
1858 tss->dx = c->regs[VCPU_REGS_RDX];
1859 tss->bx = c->regs[VCPU_REGS_RBX];
1860 tss->sp = c->regs[VCPU_REGS_RSP];
1861 tss->bp = c->regs[VCPU_REGS_RBP];
1862 tss->si = c->regs[VCPU_REGS_RSI];
1863 tss->di = c->regs[VCPU_REGS_RDI];
1865 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1866 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1867 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1868 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1869 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1872 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1873 struct x86_emulate_ops *ops,
1874 struct tss_segment_16 *tss)
1876 struct decode_cache *c = &ctxt->decode;
1880 ctxt->eflags = tss->flag | 2;
1881 c->regs[VCPU_REGS_RAX] = tss->ax;
1882 c->regs[VCPU_REGS_RCX] = tss->cx;
1883 c->regs[VCPU_REGS_RDX] = tss->dx;
1884 c->regs[VCPU_REGS_RBX] = tss->bx;
1885 c->regs[VCPU_REGS_RSP] = tss->sp;
1886 c->regs[VCPU_REGS_RBP] = tss->bp;
1887 c->regs[VCPU_REGS_RSI] = tss->si;
1888 c->regs[VCPU_REGS_RDI] = tss->di;
1891 * SDM says that segment selectors are loaded before segment
1894 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1895 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1896 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1897 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1898 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1901 * Now load segment descriptors. If fault happenes at this stage
1902 * it is handled in a context of new task
1904 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1905 if (ret != X86EMUL_CONTINUE)
1907 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1908 if (ret != X86EMUL_CONTINUE)
1910 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1911 if (ret != X86EMUL_CONTINUE)
1913 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1914 if (ret != X86EMUL_CONTINUE)
1916 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1917 if (ret != X86EMUL_CONTINUE)
1920 return X86EMUL_CONTINUE;
1923 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1924 struct x86_emulate_ops *ops,
1925 u16 tss_selector, u16 old_tss_sel,
1926 ulong old_tss_base, struct desc_struct *new_desc)
1928 struct tss_segment_16 tss_seg;
1930 u32 err, new_tss_base = get_desc_base(new_desc);
1932 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1934 if (ret == X86EMUL_PROPAGATE_FAULT) {
1935 /* FIXME: need to provide precise fault address */
1940 save_state_to_tss16(ctxt, ops, &tss_seg);
1942 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1944 if (ret == X86EMUL_PROPAGATE_FAULT) {
1945 /* FIXME: need to provide precise fault address */
1950 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1952 if (ret == X86EMUL_PROPAGATE_FAULT) {
1953 /* FIXME: need to provide precise fault address */
1958 if (old_tss_sel != 0xffff) {
1959 tss_seg.prev_task_link = old_tss_sel;
1961 ret = ops->write_std(new_tss_base,
1962 &tss_seg.prev_task_link,
1963 sizeof tss_seg.prev_task_link,
1965 if (ret == X86EMUL_PROPAGATE_FAULT) {
1966 /* FIXME: need to provide precise fault address */
1972 return load_state_from_tss16(ctxt, ops, &tss_seg);
1975 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1976 struct x86_emulate_ops *ops,
1977 struct tss_segment_32 *tss)
1979 struct decode_cache *c = &ctxt->decode;
1981 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1983 tss->eflags = ctxt->eflags;
1984 tss->eax = c->regs[VCPU_REGS_RAX];
1985 tss->ecx = c->regs[VCPU_REGS_RCX];
1986 tss->edx = c->regs[VCPU_REGS_RDX];
1987 tss->ebx = c->regs[VCPU_REGS_RBX];
1988 tss->esp = c->regs[VCPU_REGS_RSP];
1989 tss->ebp = c->regs[VCPU_REGS_RBP];
1990 tss->esi = c->regs[VCPU_REGS_RSI];
1991 tss->edi = c->regs[VCPU_REGS_RDI];
1993 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1994 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1995 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1996 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1997 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
1998 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
1999 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2002 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2003 struct x86_emulate_ops *ops,
2004 struct tss_segment_32 *tss)
2006 struct decode_cache *c = &ctxt->decode;
2009 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2010 emulate_gp(ctxt, 0);
2011 return X86EMUL_PROPAGATE_FAULT;
2014 ctxt->eflags = tss->eflags | 2;
2015 c->regs[VCPU_REGS_RAX] = tss->eax;
2016 c->regs[VCPU_REGS_RCX] = tss->ecx;
2017 c->regs[VCPU_REGS_RDX] = tss->edx;
2018 c->regs[VCPU_REGS_RBX] = tss->ebx;
2019 c->regs[VCPU_REGS_RSP] = tss->esp;
2020 c->regs[VCPU_REGS_RBP] = tss->ebp;
2021 c->regs[VCPU_REGS_RSI] = tss->esi;
2022 c->regs[VCPU_REGS_RDI] = tss->edi;
2025 * SDM says that segment selectors are loaded before segment
2028 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2029 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2030 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2031 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2032 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2033 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2034 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2037 * Now load segment descriptors. If fault happenes at this stage
2038 * it is handled in a context of new task
2040 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2041 if (ret != X86EMUL_CONTINUE)
2043 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2044 if (ret != X86EMUL_CONTINUE)
2046 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2047 if (ret != X86EMUL_CONTINUE)
2049 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2050 if (ret != X86EMUL_CONTINUE)
2052 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2053 if (ret != X86EMUL_CONTINUE)
2055 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2056 if (ret != X86EMUL_CONTINUE)
2058 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2059 if (ret != X86EMUL_CONTINUE)
2062 return X86EMUL_CONTINUE;
2065 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2066 struct x86_emulate_ops *ops,
2067 u16 tss_selector, u16 old_tss_sel,
2068 ulong old_tss_base, struct desc_struct *new_desc)
2070 struct tss_segment_32 tss_seg;
2072 u32 err, new_tss_base = get_desc_base(new_desc);
2074 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2076 if (ret == X86EMUL_PROPAGATE_FAULT) {
2077 /* FIXME: need to provide precise fault address */
2082 save_state_to_tss32(ctxt, ops, &tss_seg);
2084 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2086 if (ret == X86EMUL_PROPAGATE_FAULT) {
2087 /* FIXME: need to provide precise fault address */
2092 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2094 if (ret == X86EMUL_PROPAGATE_FAULT) {
2095 /* FIXME: need to provide precise fault address */
2100 if (old_tss_sel != 0xffff) {
2101 tss_seg.prev_task_link = old_tss_sel;
2103 ret = ops->write_std(new_tss_base,
2104 &tss_seg.prev_task_link,
2105 sizeof tss_seg.prev_task_link,
2107 if (ret == X86EMUL_PROPAGATE_FAULT) {
2108 /* FIXME: need to provide precise fault address */
2114 return load_state_from_tss32(ctxt, ops, &tss_seg);
2117 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2118 struct x86_emulate_ops *ops,
2119 u16 tss_selector, int reason,
2120 bool has_error_code, u32 error_code)
2122 struct desc_struct curr_tss_desc, next_tss_desc;
2124 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2125 ulong old_tss_base =
2126 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2129 /* FIXME: old_tss_base == ~0 ? */
2131 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2132 if (ret != X86EMUL_CONTINUE)
2134 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2135 if (ret != X86EMUL_CONTINUE)
2138 /* FIXME: check that next_tss_desc is tss */
2140 if (reason != TASK_SWITCH_IRET) {
2141 if ((tss_selector & 3) > next_tss_desc.dpl ||
2142 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2143 emulate_gp(ctxt, 0);
2144 return X86EMUL_PROPAGATE_FAULT;
2148 desc_limit = desc_limit_scaled(&next_tss_desc);
2149 if (!next_tss_desc.p ||
2150 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2151 desc_limit < 0x2b)) {
2152 emulate_ts(ctxt, tss_selector & 0xfffc);
2153 return X86EMUL_PROPAGATE_FAULT;
2156 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2157 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2158 write_segment_descriptor(ctxt, ops, old_tss_sel,
2162 if (reason == TASK_SWITCH_IRET)
2163 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2165 /* set back link to prev task only if NT bit is set in eflags
2166 note that old_tss_sel is not used afetr this point */
2167 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2168 old_tss_sel = 0xffff;
2170 if (next_tss_desc.type & 8)
2171 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2172 old_tss_base, &next_tss_desc);
2174 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2175 old_tss_base, &next_tss_desc);
2176 if (ret != X86EMUL_CONTINUE)
2179 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2180 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2182 if (reason != TASK_SWITCH_IRET) {
2183 next_tss_desc.type |= (1 << 1); /* set busy flag */
2184 write_segment_descriptor(ctxt, ops, tss_selector,
2188 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2189 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2190 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2192 if (has_error_code) {
2193 struct decode_cache *c = &ctxt->decode;
2195 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2197 c->src.val = (unsigned long) error_code;
2198 emulate_push(ctxt, ops);
2204 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2205 u16 tss_selector, int reason,
2206 bool has_error_code, u32 error_code)
2208 struct x86_emulate_ops *ops = ctxt->ops;
2209 struct decode_cache *c = &ctxt->decode;
2213 c->dst.type = OP_NONE;
2215 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2216 has_error_code, error_code);
2218 if (rc == X86EMUL_CONTINUE) {
2219 rc = writeback(ctxt, ops);
2220 if (rc == X86EMUL_CONTINUE)
2224 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2227 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2228 int reg, struct operand *op)
2230 struct decode_cache *c = &ctxt->decode;
2231 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2233 register_address_increment(c, &c->regs[reg], df * op->bytes);
2234 op->addr.mem = register_address(c, base, c->regs[reg]);
2237 static int em_push(struct x86_emulate_ctxt *ctxt)
2239 emulate_push(ctxt, ctxt->ops);
2240 return X86EMUL_CONTINUE;
2243 static int em_das(struct x86_emulate_ctxt *ctxt)
2245 struct decode_cache *c = &ctxt->decode;
2247 bool af, cf, old_cf;
2249 cf = ctxt->eflags & X86_EFLAGS_CF;
2255 af = ctxt->eflags & X86_EFLAGS_AF;
2256 if ((al & 0x0f) > 9 || af) {
2258 cf = old_cf | (al >= 250);
2263 if (old_al > 0x99 || old_cf) {
2269 /* Set PF, ZF, SF */
2270 c->src.type = OP_IMM;
2273 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2274 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2276 ctxt->eflags |= X86_EFLAGS_CF;
2278 ctxt->eflags |= X86_EFLAGS_AF;
2279 return X86EMUL_CONTINUE;
2282 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2284 struct decode_cache *c = &ctxt->decode;
2289 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2292 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2293 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2294 return X86EMUL_CONTINUE;
2297 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2299 c->src.val = old_cs;
2300 emulate_push(ctxt, ctxt->ops);
2301 rc = writeback(ctxt, ctxt->ops);
2302 if (rc != X86EMUL_CONTINUE)
2305 c->src.val = old_eip;
2306 emulate_push(ctxt, ctxt->ops);
2307 rc = writeback(ctxt, ctxt->ops);
2308 if (rc != X86EMUL_CONTINUE)
2311 c->dst.type = OP_NONE;
2313 return X86EMUL_CONTINUE;
2316 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2318 struct decode_cache *c = &ctxt->decode;
2321 c->dst.type = OP_REG;
2322 c->dst.addr.reg = &c->eip;
2323 c->dst.bytes = c->op_bytes;
2324 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2325 if (rc != X86EMUL_CONTINUE)
2327 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2328 return X86EMUL_CONTINUE;
2331 static int em_imul(struct x86_emulate_ctxt *ctxt)
2333 struct decode_cache *c = &ctxt->decode;
2335 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2336 return X86EMUL_CONTINUE;
2339 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2341 struct decode_cache *c = &ctxt->decode;
2343 c->dst.val = c->src2.val;
2344 return em_imul(ctxt);
2347 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2349 struct decode_cache *c = &ctxt->decode;
2351 c->dst.type = OP_REG;
2352 c->dst.bytes = c->src.bytes;
2353 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2354 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2356 return X86EMUL_CONTINUE;
2359 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2361 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2362 struct decode_cache *c = &ctxt->decode;
2365 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
2366 emulate_gp(ctxt, 0);
2367 return X86EMUL_PROPAGATE_FAULT;
2369 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2370 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2371 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2372 return X86EMUL_CONTINUE;
2375 static int em_mov(struct x86_emulate_ctxt *ctxt)
2377 struct decode_cache *c = &ctxt->decode;
2378 c->dst.val = c->src.val;
2379 return X86EMUL_CONTINUE;
2382 #define D(_y) { .flags = (_y) }
2384 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2385 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2386 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2388 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2389 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2391 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2392 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2393 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2396 static struct opcode group1[] = {
2400 static struct opcode group1A[] = {
2401 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2404 static struct opcode group3[] = {
2405 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2406 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2407 X4(D(SrcMem | ModRM)),
2410 static struct opcode group4[] = {
2411 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2415 static struct opcode group5[] = {
2416 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2417 D(SrcMem | ModRM | Stack),
2418 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2419 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2420 D(SrcMem | ModRM | Stack), N,
2423 static struct group_dual group7 = { {
2424 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2425 D(SrcNone | ModRM | DstMem | Mov), N,
2426 D(SrcMem16 | ModRM | Mov | Priv),
2427 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2429 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2430 D(SrcNone | ModRM | DstMem | Mov), N,
2431 D(SrcMem16 | ModRM | Mov | Priv), N,
2434 static struct opcode group8[] = {
2436 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2437 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2440 static struct group_dual group9 = { {
2441 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2443 N, N, N, N, N, N, N, N,
2446 static struct opcode group11[] = {
2447 I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
2450 static struct opcode opcode_table[256] = {
2453 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2456 D(ImplicitOps | Stack | No64), N,
2459 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2462 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2466 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2474 X8(I(SrcReg | Stack, em_push)),
2476 X8(D(DstReg | Stack)),
2478 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2479 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2482 I(SrcImm | Mov | Stack, em_push),
2483 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2484 I(SrcImmByte | Mov | Stack, em_push),
2485 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2486 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2487 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2491 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2492 G(DstMem | SrcImm | ModRM | Group, group1),
2493 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2494 G(DstMem | SrcImmByte | ModRM | Group, group1),
2495 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2497 I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
2498 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2499 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2500 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2502 X8(D(SrcAcc | DstReg)),
2504 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2505 I(SrcImmFAddr | No64, em_call_far), N,
2506 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2508 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
2509 I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
2510 I2bv(SrcSI | DstDI | Mov | String, em_mov),
2511 D2bv(SrcSI | DstDI | String),
2513 D2bv(DstAcc | SrcImm),
2514 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
2515 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2516 D2bv(SrcAcc | DstDI | String),
2518 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2520 X8(I(DstReg | SrcImm | Mov, em_mov)),
2522 D2bv(DstMem | SrcImmByte | ModRM),
2523 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2524 D(ImplicitOps | Stack),
2525 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2526 G(ByteOp, group11), G(0, group11),
2528 N, N, N, D(ImplicitOps | Stack),
2529 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2531 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2534 N, N, N, N, N, N, N, N,
2537 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2539 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2540 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2541 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2544 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2546 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2547 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2550 static struct opcode twobyte_table[256] = {
2552 N, GD(0, &group7), N, N,
2553 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2554 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2555 N, D(ImplicitOps | ModRM), N, N,
2557 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2559 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2560 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2562 N, N, N, N, N, N, N, N,
2564 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2565 D(ImplicitOps | Priv), N,
2566 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2567 N, N, N, N, N, N, N, N,
2569 X16(D(DstReg | SrcMem | ModRM | Mov)),
2571 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2573 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2575 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2579 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2581 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2582 N, D(DstMem | SrcReg | ModRM | BitOp),
2583 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2584 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2586 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2587 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2588 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2589 D(DstMem | SrcReg | Src2CL | ModRM),
2590 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2592 D2bv(DstMem | SrcReg | ModRM | Lock),
2593 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2594 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2595 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2598 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2599 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2600 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2602 D2bv(DstMem | SrcReg | ModRM | Lock),
2603 N, D(DstMem | SrcReg | ModRM | Mov),
2604 N, N, N, GD(0, &group9),
2605 N, N, N, N, N, N, N, N,
2607 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2609 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2611 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2624 static unsigned imm_size(struct decode_cache *c)
2628 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2634 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2635 unsigned size, bool sign_extension)
2637 struct decode_cache *c = &ctxt->decode;
2638 struct x86_emulate_ops *ops = ctxt->ops;
2639 int rc = X86EMUL_CONTINUE;
2643 op->addr.mem = c->eip;
2644 /* NB. Immediates are sign-extended as necessary. */
2645 switch (op->bytes) {
2647 op->val = insn_fetch(s8, 1, c->eip);
2650 op->val = insn_fetch(s16, 2, c->eip);
2653 op->val = insn_fetch(s32, 4, c->eip);
2656 if (!sign_extension) {
2657 switch (op->bytes) {
2665 op->val &= 0xffffffff;
2674 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2676 struct x86_emulate_ops *ops = ctxt->ops;
2677 struct decode_cache *c = &ctxt->decode;
2678 int rc = X86EMUL_CONTINUE;
2679 int mode = ctxt->mode;
2680 int def_op_bytes, def_ad_bytes, dual, goffset;
2681 struct opcode opcode, *g_mod012, *g_mod3;
2682 struct operand memop = { .type = OP_NONE };
2685 c->fetch.start = c->fetch.end = c->eip;
2686 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2689 case X86EMUL_MODE_REAL:
2690 case X86EMUL_MODE_VM86:
2691 case X86EMUL_MODE_PROT16:
2692 def_op_bytes = def_ad_bytes = 2;
2694 case X86EMUL_MODE_PROT32:
2695 def_op_bytes = def_ad_bytes = 4;
2697 #ifdef CONFIG_X86_64
2698 case X86EMUL_MODE_PROT64:
2707 c->op_bytes = def_op_bytes;
2708 c->ad_bytes = def_ad_bytes;
2710 /* Legacy prefixes. */
2712 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2713 case 0x66: /* operand-size override */
2714 /* switch between 2/4 bytes */
2715 c->op_bytes = def_op_bytes ^ 6;
2717 case 0x67: /* address-size override */
2718 if (mode == X86EMUL_MODE_PROT64)
2719 /* switch between 4/8 bytes */
2720 c->ad_bytes = def_ad_bytes ^ 12;
2722 /* switch between 2/4 bytes */
2723 c->ad_bytes = def_ad_bytes ^ 6;
2725 case 0x26: /* ES override */
2726 case 0x2e: /* CS override */
2727 case 0x36: /* SS override */
2728 case 0x3e: /* DS override */
2729 set_seg_override(c, (c->b >> 3) & 3);
2731 case 0x64: /* FS override */
2732 case 0x65: /* GS override */
2733 set_seg_override(c, c->b & 7);
2735 case 0x40 ... 0x4f: /* REX */
2736 if (mode != X86EMUL_MODE_PROT64)
2738 c->rex_prefix = c->b;
2740 case 0xf0: /* LOCK */
2743 case 0xf2: /* REPNE/REPNZ */
2744 c->rep_prefix = REPNE_PREFIX;
2746 case 0xf3: /* REP/REPE/REPZ */
2747 c->rep_prefix = REPE_PREFIX;
2753 /* Any legacy prefix after a REX prefix nullifies its effect. */
2761 if (c->rex_prefix & 8)
2762 c->op_bytes = 8; /* REX.W */
2764 /* Opcode byte(s). */
2765 opcode = opcode_table[c->b];
2766 /* Two-byte opcode? */
2769 c->b = insn_fetch(u8, 1, c->eip);
2770 opcode = twobyte_table[c->b];
2772 c->d = opcode.flags;
2775 dual = c->d & GroupDual;
2776 c->modrm = insn_fetch(u8, 1, c->eip);
2779 if (c->d & GroupDual) {
2780 g_mod012 = opcode.u.gdual->mod012;
2781 g_mod3 = opcode.u.gdual->mod3;
2783 g_mod012 = g_mod3 = opcode.u.group;
2785 c->d &= ~(Group | GroupDual);
2787 goffset = (c->modrm >> 3) & 7;
2789 if ((c->modrm >> 6) == 3)
2790 opcode = g_mod3[goffset];
2792 opcode = g_mod012[goffset];
2793 c->d |= opcode.flags;
2796 c->execute = opcode.u.execute;
2799 if (c->d == 0 || (c->d & Undefined)) {
2800 DPRINTF("Cannot emulate %02x\n", c->b);
2804 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2807 if (c->d & Op3264) {
2808 if (mode == X86EMUL_MODE_PROT64)
2814 /* ModRM and SIB bytes. */
2816 rc = decode_modrm(ctxt, ops, &memop);
2817 if (!c->has_seg_override)
2818 set_seg_override(c, c->modrm_seg);
2819 } else if (c->d & MemAbs)
2820 rc = decode_abs(ctxt, ops, &memop);
2821 if (rc != X86EMUL_CONTINUE)
2824 if (!c->has_seg_override)
2825 set_seg_override(c, VCPU_SREG_DS);
2827 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2828 memop.addr.mem += seg_override_base(ctxt, ops, c);
2830 if (memop.type == OP_MEM && c->ad_bytes != 8)
2831 memop.addr.mem = (u32)memop.addr.mem;
2833 if (memop.type == OP_MEM && c->rip_relative)
2834 memop.addr.mem += c->eip;
2837 * Decode and fetch the source operand: register, memory
2840 switch (c->d & SrcMask) {
2844 decode_register_operand(&c->src, c, 0);
2853 memop.bytes = (c->d & ByteOp) ? 1 :
2859 rc = decode_imm(ctxt, &c->src, 2, false);
2862 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2865 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2868 rc = decode_imm(ctxt, &c->src, 1, true);
2871 rc = decode_imm(ctxt, &c->src, 1, false);
2874 c->src.type = OP_REG;
2875 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2876 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2877 fetch_register_operand(&c->src);
2884 c->src.type = OP_MEM;
2885 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2887 register_address(c, seg_override_base(ctxt, ops, c),
2888 c->regs[VCPU_REGS_RSI]);
2892 c->src.type = OP_IMM;
2893 c->src.addr.mem = c->eip;
2894 c->src.bytes = c->op_bytes + 2;
2895 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2898 memop.bytes = c->op_bytes + 2;
2903 if (rc != X86EMUL_CONTINUE)
2907 * Decode and fetch the second source operand: register, memory
2910 switch (c->d & Src2Mask) {
2915 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2918 rc = decode_imm(ctxt, &c->src2, 1, true);
2925 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2929 if (rc != X86EMUL_CONTINUE)
2932 /* Decode and fetch the destination operand: register or memory. */
2933 switch (c->d & DstMask) {
2935 decode_register_operand(&c->dst, c,
2936 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2939 c->dst.type = OP_IMM;
2940 c->dst.addr.mem = c->eip;
2942 c->dst.val = insn_fetch(u8, 1, c->eip);
2947 if ((c->d & DstMask) == DstMem64)
2950 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2952 fetch_bit_operand(c);
2953 c->dst.orig_val = c->dst.val;
2956 c->dst.type = OP_REG;
2957 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2958 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2959 fetch_register_operand(&c->dst);
2960 c->dst.orig_val = c->dst.val;
2963 c->dst.type = OP_MEM;
2964 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2966 register_address(c, es_base(ctxt, ops),
2967 c->regs[VCPU_REGS_RDI]);
2971 /* Special instructions do their own operand decoding. */
2973 c->dst.type = OP_NONE; /* Disable writeback. */
2978 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2981 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
2983 struct decode_cache *c = &ctxt->decode;
2985 /* The second termination condition only applies for REPE
2986 * and REPNE. Test if the repeat string operation prefix is
2987 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2988 * corresponding termination condition according to:
2989 * - if REPE/REPZ and ZF = 0 then done
2990 * - if REPNE/REPNZ and ZF = 1 then done
2992 if (((c->b == 0xa6) || (c->b == 0xa7) ||
2993 (c->b == 0xae) || (c->b == 0xaf))
2994 && (((c->rep_prefix == REPE_PREFIX) &&
2995 ((ctxt->eflags & EFLG_ZF) == 0))
2996 || ((c->rep_prefix == REPNE_PREFIX) &&
2997 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3004 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3006 struct x86_emulate_ops *ops = ctxt->ops;
3008 struct decode_cache *c = &ctxt->decode;
3009 int rc = X86EMUL_CONTINUE;
3010 int saved_dst_type = c->dst.type;
3011 int irq; /* Used for int 3, int, and into */
3013 ctxt->decode.mem_read.pos = 0;
3015 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3020 /* LOCK prefix is allowed only with some instructions */
3021 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3026 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3031 /* Privileged instruction can be executed only in CPL=0 */
3032 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3033 emulate_gp(ctxt, 0);
3037 if (c->rep_prefix && (c->d & String)) {
3038 /* All REP prefixes have the same first termination condition */
3039 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3045 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3046 rc = read_emulated(ctxt, ops, c->src.addr.mem,
3047 c->src.valptr, c->src.bytes);
3048 if (rc != X86EMUL_CONTINUE)
3050 c->src.orig_val64 = c->src.val64;
3053 if (c->src2.type == OP_MEM) {
3054 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
3055 &c->src2.val, c->src2.bytes);
3056 if (rc != X86EMUL_CONTINUE)
3060 if ((c->d & DstMask) == ImplicitOps)
3064 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3065 /* optimisation - avoid slow emulated read if Mov */
3066 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
3067 &c->dst.val, c->dst.bytes);
3068 if (rc != X86EMUL_CONTINUE)
3071 c->dst.orig_val = c->dst.val;
3076 rc = c->execute(ctxt);
3077 if (rc != X86EMUL_CONTINUE)
3088 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3090 case 0x06: /* push es */
3091 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3093 case 0x07: /* pop es */
3094 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3098 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3100 case 0x0e: /* push cs */
3101 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3105 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3107 case 0x16: /* push ss */
3108 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3110 case 0x17: /* pop ss */
3111 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3115 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3117 case 0x1e: /* push ds */
3118 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3120 case 0x1f: /* pop ds */
3121 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3125 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3129 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3133 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3137 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3139 case 0x40 ... 0x47: /* inc r16/r32 */
3140 emulate_1op("inc", c->dst, ctxt->eflags);
3142 case 0x48 ... 0x4f: /* dec r16/r32 */
3143 emulate_1op("dec", c->dst, ctxt->eflags);
3145 case 0x58 ... 0x5f: /* pop reg */
3147 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3149 case 0x60: /* pusha */
3150 rc = emulate_pusha(ctxt, ops);
3152 case 0x61: /* popa */
3153 rc = emulate_popa(ctxt, ops);
3155 case 0x63: /* movsxd */
3156 if (ctxt->mode != X86EMUL_MODE_PROT64)
3157 goto cannot_emulate;
3158 c->dst.val = (s32) c->src.val;
3160 case 0x6c: /* insb */
3161 case 0x6d: /* insw/insd */
3162 c->src.val = c->regs[VCPU_REGS_RDX];
3164 case 0x6e: /* outsb */
3165 case 0x6f: /* outsw/outsd */
3166 c->dst.val = c->regs[VCPU_REGS_RDX];
3169 case 0x70 ... 0x7f: /* jcc (short) */
3170 if (test_cc(c->b, ctxt->eflags))
3171 jmp_rel(c, c->src.val);
3173 case 0x80 ... 0x83: /* Grp1 */
3174 switch (c->modrm_reg) {
3195 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3197 case 0x86 ... 0x87: /* xchg */
3199 /* Write back the register source. */
3200 c->src.val = c->dst.val;
3201 write_register_operand(&c->src);
3203 * Write back the memory destination with implicit LOCK
3206 c->dst.val = c->src.orig_val;
3209 case 0x8c: /* mov r/m, sreg */
3210 if (c->modrm_reg > VCPU_SREG_GS) {
3214 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3216 case 0x8d: /* lea r16/r32, m */
3217 c->dst.val = c->src.addr.mem;
3219 case 0x8e: { /* mov seg, r/m16 */
3224 if (c->modrm_reg == VCPU_SREG_CS ||
3225 c->modrm_reg > VCPU_SREG_GS) {
3230 if (c->modrm_reg == VCPU_SREG_SS)
3231 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3233 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3235 c->dst.type = OP_NONE; /* Disable writeback. */
3238 case 0x8f: /* pop (sole member of Grp1a) */
3239 rc = emulate_grp1a(ctxt, ops);
3241 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3242 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3245 case 0x98: /* cbw/cwde/cdqe */
3246 switch (c->op_bytes) {
3247 case 2: c->dst.val = (s8)c->dst.val; break;
3248 case 4: c->dst.val = (s16)c->dst.val; break;
3249 case 8: c->dst.val = (s32)c->dst.val; break;
3252 case 0x9c: /* pushf */
3253 c->src.val = (unsigned long) ctxt->eflags;
3254 emulate_push(ctxt, ops);
3256 case 0x9d: /* popf */
3257 c->dst.type = OP_REG;
3258 c->dst.addr.reg = &ctxt->eflags;
3259 c->dst.bytes = c->op_bytes;
3260 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3262 case 0xa6 ... 0xa7: /* cmps */
3263 c->dst.type = OP_NONE; /* Disable writeback. */
3264 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3266 case 0xa8 ... 0xa9: /* test ax, imm */
3268 case 0xae ... 0xaf: /* scas */
3273 case 0xc3: /* ret */
3274 c->dst.type = OP_REG;
3275 c->dst.addr.reg = &c->eip;
3276 c->dst.bytes = c->op_bytes;
3277 goto pop_instruction;
3278 case 0xc4: /* les */
3279 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3281 case 0xc5: /* lds */
3282 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3284 case 0xcb: /* ret far */
3285 rc = emulate_ret_far(ctxt, ops);
3287 case 0xcc: /* int3 */
3290 case 0xcd: /* int n */
3293 rc = emulate_int(ctxt, ops, irq);
3295 case 0xce: /* into */
3296 if (ctxt->eflags & EFLG_OF) {
3301 case 0xcf: /* iret */
3302 rc = emulate_iret(ctxt, ops);
3304 case 0xd0 ... 0xd1: /* Grp2 */
3307 case 0xd2 ... 0xd3: /* Grp2 */
3308 c->src.val = c->regs[VCPU_REGS_RCX];
3311 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3312 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3313 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3314 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3315 jmp_rel(c, c->src.val);
3317 case 0xe3: /* jcxz/jecxz/jrcxz */
3318 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3319 jmp_rel(c, c->src.val);
3321 case 0xe4: /* inb */
3324 case 0xe6: /* outb */
3325 case 0xe7: /* out */
3327 case 0xe8: /* call (near) */ {
3328 long int rel = c->src.val;
3329 c->src.val = (unsigned long) c->eip;
3331 emulate_push(ctxt, ops);
3334 case 0xe9: /* jmp rel */
3336 case 0xea: { /* jmp far */
3339 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3341 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3345 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3349 jmp: /* jmp rel short */
3350 jmp_rel(c, c->src.val);
3351 c->dst.type = OP_NONE; /* Disable writeback. */
3353 case 0xec: /* in al,dx */
3354 case 0xed: /* in (e/r)ax,dx */
3355 c->src.val = c->regs[VCPU_REGS_RDX];
3357 c->dst.bytes = min(c->dst.bytes, 4u);
3358 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3359 emulate_gp(ctxt, 0);
3362 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3364 goto done; /* IO is needed */
3366 case 0xee: /* out dx,al */
3367 case 0xef: /* out dx,(e/r)ax */
3368 c->dst.val = c->regs[VCPU_REGS_RDX];
3370 c->src.bytes = min(c->src.bytes, 4u);
3371 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3373 emulate_gp(ctxt, 0);
3376 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3377 &c->src.val, 1, ctxt->vcpu);
3378 c->dst.type = OP_NONE; /* Disable writeback. */
3380 case 0xf4: /* hlt */
3381 ctxt->vcpu->arch.halt_request = 1;
3383 case 0xf5: /* cmc */
3384 /* complement carry flag from eflags reg */
3385 ctxt->eflags ^= EFLG_CF;
3387 case 0xf6 ... 0xf7: /* Grp3 */
3388 rc = emulate_grp3(ctxt, ops);
3390 case 0xf8: /* clc */
3391 ctxt->eflags &= ~EFLG_CF;
3393 case 0xf9: /* stc */
3394 ctxt->eflags |= EFLG_CF;
3396 case 0xfa: /* cli */
3397 if (emulator_bad_iopl(ctxt, ops)) {
3398 emulate_gp(ctxt, 0);
3401 ctxt->eflags &= ~X86_EFLAGS_IF;
3403 case 0xfb: /* sti */
3404 if (emulator_bad_iopl(ctxt, ops)) {
3405 emulate_gp(ctxt, 0);
3408 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3409 ctxt->eflags |= X86_EFLAGS_IF;
3412 case 0xfc: /* cld */
3413 ctxt->eflags &= ~EFLG_DF;
3415 case 0xfd: /* std */
3416 ctxt->eflags |= EFLG_DF;
3418 case 0xfe: /* Grp4 */
3420 rc = emulate_grp45(ctxt, ops);
3422 case 0xff: /* Grp5 */
3423 if (c->modrm_reg == 5)
3427 goto cannot_emulate;
3430 if (rc != X86EMUL_CONTINUE)
3434 rc = writeback(ctxt, ops);
3435 if (rc != X86EMUL_CONTINUE)
3439 * restore dst type in case the decoding will be reused
3440 * (happens for string instruction )
3442 c->dst.type = saved_dst_type;
3444 if ((c->d & SrcMask) == SrcSI)
3445 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3446 VCPU_REGS_RSI, &c->src);
3448 if ((c->d & DstMask) == DstDI)
3449 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3452 if (c->rep_prefix && (c->d & String)) {
3453 struct read_cache *r = &ctxt->decode.io_read;
3454 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3456 if (!string_insn_completed(ctxt)) {
3458 * Re-enter guest when pio read ahead buffer is empty
3459 * or, if it is not used, after each 1024 iteration.
3461 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3462 (r->end == 0 || r->end != r->pos)) {
3464 * Reset read cache. Usually happens before
3465 * decode, but since instruction is restarted
3466 * we have to do it here.
3468 ctxt->decode.mem_read.end = 0;
3469 return EMULATION_RESTART;
3471 goto done; /* skip rip writeback */
3478 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3482 case 0x01: /* lgdt, lidt, lmsw */
3483 switch (c->modrm_reg) {
3485 unsigned long address;
3487 case 0: /* vmcall */
3488 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3489 goto cannot_emulate;
3491 rc = kvm_fix_hypercall(ctxt->vcpu);
3492 if (rc != X86EMUL_CONTINUE)
3495 /* Let the processor re-execute the fixed hypercall */
3497 /* Disable writeback. */
3498 c->dst.type = OP_NONE;
3501 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3502 &size, &address, c->op_bytes);
3503 if (rc != X86EMUL_CONTINUE)
3505 realmode_lgdt(ctxt->vcpu, size, address);
3506 /* Disable writeback. */
3507 c->dst.type = OP_NONE;
3509 case 3: /* lidt/vmmcall */
3510 if (c->modrm_mod == 3) {
3511 switch (c->modrm_rm) {
3513 rc = kvm_fix_hypercall(ctxt->vcpu);
3516 goto cannot_emulate;
3519 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3522 if (rc != X86EMUL_CONTINUE)
3524 realmode_lidt(ctxt->vcpu, size, address);
3526 /* Disable writeback. */
3527 c->dst.type = OP_NONE;
3531 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3534 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3535 (c->src.val & 0x0f), ctxt->vcpu);
3536 c->dst.type = OP_NONE;
3538 case 5: /* not defined */
3542 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3543 /* Disable writeback. */
3544 c->dst.type = OP_NONE;
3547 goto cannot_emulate;
3550 case 0x05: /* syscall */
3551 rc = emulate_syscall(ctxt, ops);
3554 emulate_clts(ctxt->vcpu);
3556 case 0x09: /* wbinvd */
3557 kvm_emulate_wbinvd(ctxt->vcpu);
3559 case 0x08: /* invd */
3560 case 0x0d: /* GrpP (prefetch) */
3561 case 0x18: /* Grp16 (prefetch/nop) */
3563 case 0x20: /* mov cr, reg */
3564 switch (c->modrm_reg) {
3571 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3573 case 0x21: /* mov from dr to reg */
3574 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3575 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3579 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3581 case 0x22: /* mov reg, cr */
3582 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3583 emulate_gp(ctxt, 0);
3586 c->dst.type = OP_NONE;
3588 case 0x23: /* mov from reg to dr */
3589 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3590 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3595 if (ops->set_dr(c->modrm_reg, c->src.val &
3596 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3597 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3598 /* #UD condition is already handled by the code above */
3599 emulate_gp(ctxt, 0);
3603 c->dst.type = OP_NONE; /* no writeback */
3607 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3608 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3609 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3610 emulate_gp(ctxt, 0);
3613 rc = X86EMUL_CONTINUE;
3617 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3618 emulate_gp(ctxt, 0);
3621 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3622 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3624 rc = X86EMUL_CONTINUE;
3626 case 0x34: /* sysenter */
3627 rc = emulate_sysenter(ctxt, ops);
3629 case 0x35: /* sysexit */
3630 rc = emulate_sysexit(ctxt, ops);
3632 case 0x40 ... 0x4f: /* cmov */
3633 c->dst.val = c->dst.orig_val = c->src.val;
3634 if (!test_cc(c->b, ctxt->eflags))
3635 c->dst.type = OP_NONE; /* no writeback */
3637 case 0x80 ... 0x8f: /* jnz rel, etc*/
3638 if (test_cc(c->b, ctxt->eflags))
3639 jmp_rel(c, c->src.val);
3641 case 0x90 ... 0x9f: /* setcc r/m8 */
3642 c->dst.val = test_cc(c->b, ctxt->eflags);
3644 case 0xa0: /* push fs */
3645 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3647 case 0xa1: /* pop fs */
3648 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3652 c->dst.type = OP_NONE;
3653 /* only subword offset */
3654 c->src.val &= (c->dst.bytes << 3) - 1;
3655 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3657 case 0xa4: /* shld imm8, r, r/m */
3658 case 0xa5: /* shld cl, r, r/m */
3659 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3661 case 0xa8: /* push gs */
3662 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3664 case 0xa9: /* pop gs */
3665 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3669 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3671 case 0xac: /* shrd imm8, r, r/m */
3672 case 0xad: /* shrd cl, r, r/m */
3673 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3675 case 0xae: /* clflush */
3677 case 0xb0 ... 0xb1: /* cmpxchg */
3679 * Save real source value, then compare EAX against
3682 c->src.orig_val = c->src.val;
3683 c->src.val = c->regs[VCPU_REGS_RAX];
3684 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3685 if (ctxt->eflags & EFLG_ZF) {
3686 /* Success: write back to memory. */
3687 c->dst.val = c->src.orig_val;
3689 /* Failure: write the value we saw to EAX. */
3690 c->dst.type = OP_REG;
3691 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3694 case 0xb2: /* lss */
3695 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3699 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3701 case 0xb4: /* lfs */
3702 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3704 case 0xb5: /* lgs */
3705 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3707 case 0xb6 ... 0xb7: /* movzx */
3708 c->dst.bytes = c->op_bytes;
3709 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3712 case 0xba: /* Grp8 */
3713 switch (c->modrm_reg & 3) {
3726 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3728 case 0xbc: { /* bsf */
3730 __asm__ ("bsf %2, %0; setz %1"
3731 : "=r"(c->dst.val), "=q"(zf)
3733 ctxt->eflags &= ~X86_EFLAGS_ZF;
3735 ctxt->eflags |= X86_EFLAGS_ZF;
3736 c->dst.type = OP_NONE; /* Disable writeback. */
3740 case 0xbd: { /* bsr */
3742 __asm__ ("bsr %2, %0; setz %1"
3743 : "=r"(c->dst.val), "=q"(zf)
3745 ctxt->eflags &= ~X86_EFLAGS_ZF;
3747 ctxt->eflags |= X86_EFLAGS_ZF;
3748 c->dst.type = OP_NONE; /* Disable writeback. */
3752 case 0xbe ... 0xbf: /* movsx */
3753 c->dst.bytes = c->op_bytes;
3754 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3757 case 0xc0 ... 0xc1: /* xadd */
3758 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3759 /* Write back the register source. */
3760 c->src.val = c->dst.orig_val;
3761 write_register_operand(&c->src);
3763 case 0xc3: /* movnti */
3764 c->dst.bytes = c->op_bytes;
3765 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3768 case 0xc7: /* Grp9 (cmpxchg8b) */
3769 rc = emulate_grp9(ctxt, ops);
3772 goto cannot_emulate;
3775 if (rc != X86EMUL_CONTINUE)
3781 DPRINTF("Cannot emulate %02x\n", c->b);