1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affilates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
26 #include <public/xen.h>
27 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
29 #include <linux/kvm_host.h>
30 #include "kvm_cache_regs.h"
31 #define DPRINTF(x...) do {} while (0)
33 #include <linux/module.h>
34 #include <asm/kvm_emulate.h>
40 * Opcode effective-address decode tables.
41 * Note that we only emulate instructions that have at least one memory
42 * operand (excluding implicit stack references). We assume that stack
43 * references and instruction fetches will never occur in special memory
44 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
48 /* Operand sizes: 8-bit operands or specified/overridden size. */
49 #define ByteOp (1<<0) /* 8-bit operands. */
50 /* Destination operand type. */
51 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */
52 #define DstReg (2<<1) /* Register operand. */
53 #define DstMem (3<<1) /* Memory operand. */
54 #define DstAcc (4<<1) /* Destination Accumulator */
55 #define DstDI (5<<1) /* Destination is in ES:(E)DI */
56 #define DstMem64 (6<<1) /* 64bit memory operand */
57 #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
58 #define DstMask (7<<1)
59 /* Source operand type. */
60 #define SrcNone (0<<4) /* No source operand. */
61 #define SrcReg (1<<4) /* Register operand. */
62 #define SrcMem (2<<4) /* Memory operand. */
63 #define SrcMem16 (3<<4) /* Memory operand (16-bit). */
64 #define SrcMem32 (4<<4) /* Memory operand (32-bit). */
65 #define SrcImm (5<<4) /* Immediate operand. */
66 #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
67 #define SrcOne (7<<4) /* Implied '1' */
68 #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
69 #define SrcImmU (9<<4) /* Immediate operand, unsigned */
70 #define SrcSI (0xa<<4) /* Source is in the DS:RSI */
71 #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
72 #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
73 #define SrcAcc (0xd<<4) /* Source Accumulator */
74 #define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
75 #define SrcMask (0xf<<4)
76 /* Generic ModRM decode. */
78 /* Destination is only written; never read. */
81 #define MemAbs (1<<11) /* Memory operand is absolute displacement */
82 #define String (1<<12) /* String instruction (rep capable) */
83 #define Stack (1<<13) /* Stack instruction (push/pop) */
84 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
85 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
87 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89 #define Undefined (1<<25) /* No Such Instruction */
90 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
91 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
93 /* Source 2 operand type */
94 #define Src2None (0<<29)
95 #define Src2CL (1<<29)
96 #define Src2ImmByte (2<<29)
97 #define Src2One (3<<29)
98 #define Src2Imm (4<<29)
99 #define Src2Mask (7<<29)
101 #define X2(x...) x, x
102 #define X3(x...) X2(x), x
103 #define X4(x...) X2(x), X2(x)
104 #define X5(x...) X4(x), x
105 #define X6(x...) X4(x), X2(x)
106 #define X7(x...) X4(x), X3(x)
107 #define X8(x...) X4(x), X4(x)
108 #define X16(x...) X8(x), X8(x)
113 int (*execute)(struct x86_emulate_ctxt *ctxt);
114 struct opcode *group;
115 struct group_dual *gdual;
120 struct opcode mod012[8];
121 struct opcode mod3[8];
124 /* EFLAGS bit definitions. */
125 #define EFLG_ID (1<<21)
126 #define EFLG_VIP (1<<20)
127 #define EFLG_VIF (1<<19)
128 #define EFLG_AC (1<<18)
129 #define EFLG_VM (1<<17)
130 #define EFLG_RF (1<<16)
131 #define EFLG_IOPL (3<<12)
132 #define EFLG_NT (1<<14)
133 #define EFLG_OF (1<<11)
134 #define EFLG_DF (1<<10)
135 #define EFLG_IF (1<<9)
136 #define EFLG_TF (1<<8)
137 #define EFLG_SF (1<<7)
138 #define EFLG_ZF (1<<6)
139 #define EFLG_AF (1<<4)
140 #define EFLG_PF (1<<2)
141 #define EFLG_CF (1<<0)
143 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
144 #define EFLG_RESERVED_ONE_MASK 2
147 * Instruction emulation:
148 * Most instructions are emulated directly via a fragment of inline assembly
149 * code. This allows us to save/restore EFLAGS and thus very easily pick up
150 * any modified flags.
153 #if defined(CONFIG_X86_64)
154 #define _LO32 "k" /* force 32-bit operand */
155 #define _STK "%%rsp" /* stack pointer */
156 #elif defined(__i386__)
157 #define _LO32 "" /* force 32-bit operand */
158 #define _STK "%%esp" /* stack pointer */
162 * These EFLAGS bits are restored from saved value during emulation, and
163 * any changes are written back to the saved value after emulation.
165 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
167 /* Before executing instruction: restore necessary bits in EFLAGS. */
168 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
169 /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
170 "movl %"_sav",%"_LO32 _tmp"; " \
173 "movl %"_msk",%"_LO32 _tmp"; " \
174 "andl %"_LO32 _tmp",("_STK"); " \
176 "notl %"_LO32 _tmp"; " \
177 "andl %"_LO32 _tmp",("_STK"); " \
178 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
180 "orl %"_LO32 _tmp",("_STK"); " \
184 /* After executing instruction: write-back necessary bits in EFLAGS. */
185 #define _POST_EFLAGS(_sav, _msk, _tmp) \
186 /* _sav |= EFLAGS & _msk; */ \
189 "andl %"_msk",%"_LO32 _tmp"; " \
190 "orl %"_LO32 _tmp",%"_sav"; "
198 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
200 __asm__ __volatile__ ( \
201 _PRE_EFLAGS("0", "4", "2") \
202 _op _suffix " %"_x"3,%1; " \
203 _POST_EFLAGS("0", "4", "2") \
204 : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
206 : _y ((_src).val), "i" (EFLAGS_MASK)); \
210 /* Raw emulation: instruction has two explicit operands. */
211 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
213 unsigned long _tmp; \
215 switch ((_dst).bytes) { \
217 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
220 ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
223 ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
228 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
230 unsigned long _tmp; \
231 switch ((_dst).bytes) { \
233 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
236 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
237 _wx, _wy, _lx, _ly, _qx, _qy); \
242 /* Source operand is byte-sized and may be restricted to just %cl. */
243 #define emulate_2op_SrcB(_op, _src, _dst, _eflags) \
244 __emulate_2op(_op, _src, _dst, _eflags, \
245 "b", "c", "b", "c", "b", "c", "b", "c")
247 /* Source operand is byte, word, long or quad sized. */
248 #define emulate_2op_SrcV(_op, _src, _dst, _eflags) \
249 __emulate_2op(_op, _src, _dst, _eflags, \
250 "b", "q", "w", "r", _LO32, "r", "", "r")
252 /* Source operand is word, long or quad sized. */
253 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags) \
254 __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
255 "w", "r", _LO32, "r", "", "r")
257 /* Instruction has three operands and one operand is stored in ECX register */
258 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
260 unsigned long _tmp; \
261 _type _clv = (_cl).val; \
262 _type _srcv = (_src).val; \
263 _type _dstv = (_dst).val; \
265 __asm__ __volatile__ ( \
266 _PRE_EFLAGS("0", "5", "2") \
267 _op _suffix " %4,%1 \n" \
268 _POST_EFLAGS("0", "5", "2") \
269 : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
270 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
273 (_cl).val = (unsigned long) _clv; \
274 (_src).val = (unsigned long) _srcv; \
275 (_dst).val = (unsigned long) _dstv; \
278 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \
280 switch ((_dst).bytes) { \
282 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
283 "w", unsigned short); \
286 __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
287 "l", unsigned int); \
290 ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
291 "q", unsigned long)); \
296 #define __emulate_1op(_op, _dst, _eflags, _suffix) \
298 unsigned long _tmp; \
300 __asm__ __volatile__ ( \
301 _PRE_EFLAGS("0", "3", "2") \
302 _op _suffix " %1; " \
303 _POST_EFLAGS("0", "3", "2") \
304 : "=m" (_eflags), "+m" ((_dst).val), \
306 : "i" (EFLAGS_MASK)); \
309 /* Instruction has only one explicit operand (no source operand). */
310 #define emulate_1op(_op, _dst, _eflags) \
312 switch ((_dst).bytes) { \
313 case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \
314 case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \
315 case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \
316 case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
320 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \
322 unsigned long _tmp; \
324 __asm__ __volatile__ ( \
325 _PRE_EFLAGS("0", "4", "1") \
326 _op _suffix " %5; " \
327 _POST_EFLAGS("0", "4", "1") \
328 : "=m" (_eflags), "=&r" (_tmp), \
329 "+a" (_rax), "+d" (_rdx) \
330 : "i" (EFLAGS_MASK), "m" ((_src).val), \
331 "a" (_rax), "d" (_rdx)); \
334 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
336 unsigned long _tmp; \
338 __asm__ __volatile__ ( \
339 _PRE_EFLAGS("0", "5", "1") \
341 _op _suffix " %6; " \
343 _POST_EFLAGS("0", "5", "1") \
344 ".pushsection .fixup,\"ax\" \n\t" \
345 "3: movb $1, %4 \n\t" \
348 _ASM_EXTABLE(1b, 3b) \
349 : "=m" (_eflags), "=&r" (_tmp), \
350 "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \
351 : "i" (EFLAGS_MASK), "m" ((_src).val), \
352 "a" (_rax), "d" (_rdx)); \
355 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
356 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \
358 switch((_src).bytes) { \
359 case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
360 case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \
361 case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
362 case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
366 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \
368 switch((_src).bytes) { \
370 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
371 _eflags, "b", _ex); \
374 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
375 _eflags, "w", _ex); \
378 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
379 _eflags, "l", _ex); \
382 __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
383 _eflags, "q", _ex)); \
388 /* Fetch next part of the instruction being emulated. */
389 #define insn_fetch(_type, _size, _eip) \
390 ({ unsigned long _x; \
391 rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
392 if (rc != X86EMUL_CONTINUE) \
398 #define insn_fetch_arr(_arr, _size, _eip) \
399 ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \
400 if (rc != X86EMUL_CONTINUE) \
405 static inline unsigned long ad_mask(struct decode_cache *c)
407 return (1UL << (c->ad_bytes << 3)) - 1;
410 /* Access/update address held in a register, based on addressing mode. */
411 static inline unsigned long
412 address_mask(struct decode_cache *c, unsigned long reg)
414 if (c->ad_bytes == sizeof(unsigned long))
417 return reg & ad_mask(c);
420 static inline unsigned long
421 register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
423 return base + address_mask(c, reg);
427 register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
429 if (c->ad_bytes == sizeof(unsigned long))
432 *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
435 static inline void jmp_rel(struct decode_cache *c, int rel)
437 register_address_increment(c, &c->eip, rel);
440 static void set_seg_override(struct decode_cache *c, int seg)
442 c->has_seg_override = true;
443 c->seg_override = seg;
446 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
447 struct x86_emulate_ops *ops, int seg)
449 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
452 return ops->get_cached_segment_base(seg, ctxt->vcpu);
455 static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
456 struct x86_emulate_ops *ops,
457 struct decode_cache *c)
459 if (!c->has_seg_override)
462 return seg_base(ctxt, ops, c->seg_override);
465 static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
466 struct x86_emulate_ops *ops)
468 return seg_base(ctxt, ops, VCPU_SREG_ES);
471 static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
472 struct x86_emulate_ops *ops)
474 return seg_base(ctxt, ops, VCPU_SREG_SS);
477 static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
478 u32 error, bool valid)
480 ctxt->exception = vec;
481 ctxt->error_code = error;
482 ctxt->error_code_valid = valid;
485 static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
487 emulate_exception(ctxt, GP_VECTOR, err, true);
490 static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
494 emulate_exception(ctxt, PF_VECTOR, err, true);
497 static void emulate_ud(struct x86_emulate_ctxt *ctxt)
499 emulate_exception(ctxt, UD_VECTOR, 0, false);
502 static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
504 emulate_exception(ctxt, TS_VECTOR, err, true);
507 static int emulate_de(struct x86_emulate_ctxt *ctxt)
509 emulate_exception(ctxt, DE_VECTOR, 0, false);
510 return X86EMUL_PROPAGATE_FAULT;
513 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
514 struct x86_emulate_ops *ops,
515 unsigned long eip, u8 *dest)
517 struct fetch_cache *fc = &ctxt->decode.fetch;
521 if (eip == fc->end) {
522 cur_size = fc->end - fc->start;
523 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
524 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
525 size, ctxt->vcpu, NULL);
526 if (rc != X86EMUL_CONTINUE)
530 *dest = fc->data[eip - fc->start];
531 return X86EMUL_CONTINUE;
534 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
535 struct x86_emulate_ops *ops,
536 unsigned long eip, void *dest, unsigned size)
540 /* x86 instructions are limited to 15 bytes. */
541 if (eip + size - ctxt->eip > 15)
542 return X86EMUL_UNHANDLEABLE;
544 rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
545 if (rc != X86EMUL_CONTINUE)
548 return X86EMUL_CONTINUE;
552 * Given the 'reg' portion of a ModRM byte, and a register block, return a
553 * pointer into the block that addresses the relevant register.
554 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
556 static void *decode_register(u8 modrm_reg, unsigned long *regs,
561 p = ®s[modrm_reg];
562 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
563 p = (unsigned char *)®s[modrm_reg & 3] + 1;
567 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
568 struct x86_emulate_ops *ops,
570 u16 *size, unsigned long *address, int op_bytes)
577 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
578 if (rc != X86EMUL_CONTINUE)
580 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
584 static int test_cc(unsigned int condition, unsigned int flags)
588 switch ((condition & 15) >> 1) {
590 rc |= (flags & EFLG_OF);
592 case 1: /* b/c/nae */
593 rc |= (flags & EFLG_CF);
596 rc |= (flags & EFLG_ZF);
599 rc |= (flags & (EFLG_CF|EFLG_ZF));
602 rc |= (flags & EFLG_SF);
605 rc |= (flags & EFLG_PF);
608 rc |= (flags & EFLG_ZF);
611 rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
615 /* Odd condition identifiers (lsb == 1) have inverted sense. */
616 return (!!rc ^ (condition & 1));
619 static void fetch_register_operand(struct operand *op)
623 op->val = *(u8 *)op->addr.reg;
626 op->val = *(u16 *)op->addr.reg;
629 op->val = *(u32 *)op->addr.reg;
632 op->val = *(u64 *)op->addr.reg;
637 static void decode_register_operand(struct operand *op,
638 struct decode_cache *c,
641 unsigned reg = c->modrm_reg;
642 int highbyte_regs = c->rex_prefix == 0;
645 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
647 if ((c->d & ByteOp) && !inhibit_bytereg) {
648 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
651 op->addr.reg = decode_register(reg, c->regs, 0);
652 op->bytes = c->op_bytes;
654 fetch_register_operand(op);
655 op->orig_val = op->val;
658 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
659 struct x86_emulate_ops *ops,
662 struct decode_cache *c = &ctxt->decode;
664 int index_reg = 0, base_reg = 0, scale;
665 int rc = X86EMUL_CONTINUE;
669 c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
670 index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
671 c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
674 c->modrm = insn_fetch(u8, 1, c->eip);
675 c->modrm_mod |= (c->modrm & 0xc0) >> 6;
676 c->modrm_reg |= (c->modrm & 0x38) >> 3;
677 c->modrm_rm |= (c->modrm & 0x07);
678 c->modrm_seg = VCPU_SREG_DS;
680 if (c->modrm_mod == 3) {
682 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
683 op->addr.reg = decode_register(c->modrm_rm,
684 c->regs, c->d & ByteOp);
685 fetch_register_operand(op);
691 if (c->ad_bytes == 2) {
692 unsigned bx = c->regs[VCPU_REGS_RBX];
693 unsigned bp = c->regs[VCPU_REGS_RBP];
694 unsigned si = c->regs[VCPU_REGS_RSI];
695 unsigned di = c->regs[VCPU_REGS_RDI];
697 /* 16-bit ModR/M decode. */
698 switch (c->modrm_mod) {
700 if (c->modrm_rm == 6)
701 modrm_ea += insn_fetch(u16, 2, c->eip);
704 modrm_ea += insn_fetch(s8, 1, c->eip);
707 modrm_ea += insn_fetch(u16, 2, c->eip);
710 switch (c->modrm_rm) {
730 if (c->modrm_mod != 0)
737 if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
738 (c->modrm_rm == 6 && c->modrm_mod != 0))
739 c->modrm_seg = VCPU_SREG_SS;
740 modrm_ea = (u16)modrm_ea;
742 /* 32/64-bit ModR/M decode. */
743 if ((c->modrm_rm & 7) == 4) {
744 sib = insn_fetch(u8, 1, c->eip);
745 index_reg |= (sib >> 3) & 7;
749 if ((base_reg & 7) == 5 && c->modrm_mod == 0)
750 modrm_ea += insn_fetch(s32, 4, c->eip);
752 modrm_ea += c->regs[base_reg];
754 modrm_ea += c->regs[index_reg] << scale;
755 } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
756 if (ctxt->mode == X86EMUL_MODE_PROT64)
759 modrm_ea += c->regs[c->modrm_rm];
760 switch (c->modrm_mod) {
762 if (c->modrm_rm == 5)
763 modrm_ea += insn_fetch(s32, 4, c->eip);
766 modrm_ea += insn_fetch(s8, 1, c->eip);
769 modrm_ea += insn_fetch(s32, 4, c->eip);
773 op->addr.mem = modrm_ea;
778 static int decode_abs(struct x86_emulate_ctxt *ctxt,
779 struct x86_emulate_ops *ops,
782 struct decode_cache *c = &ctxt->decode;
783 int rc = X86EMUL_CONTINUE;
786 switch (c->ad_bytes) {
788 op->addr.mem = insn_fetch(u16, 2, c->eip);
791 op->addr.mem = insn_fetch(u32, 4, c->eip);
794 op->addr.mem = insn_fetch(u64, 8, c->eip);
801 static void fetch_bit_operand(struct decode_cache *c)
805 if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
806 mask = ~(c->dst.bytes * 8 - 1);
808 if (c->src.bytes == 2)
809 sv = (s16)c->src.val & (s16)mask;
810 else if (c->src.bytes == 4)
811 sv = (s32)c->src.val & (s32)mask;
813 c->dst.addr.mem += (sv >> 3);
816 /* only subword offset */
817 c->src.val &= (c->dst.bytes << 3) - 1;
820 static int read_emulated(struct x86_emulate_ctxt *ctxt,
821 struct x86_emulate_ops *ops,
822 unsigned long addr, void *dest, unsigned size)
825 struct read_cache *mc = &ctxt->decode.mem_read;
829 int n = min(size, 8u);
831 if (mc->pos < mc->end)
834 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
836 if (rc == X86EMUL_PROPAGATE_FAULT)
837 emulate_pf(ctxt, addr, err);
838 if (rc != X86EMUL_CONTINUE)
843 memcpy(dest, mc->data + mc->pos, n);
848 return X86EMUL_CONTINUE;
851 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
852 struct x86_emulate_ops *ops,
853 unsigned int size, unsigned short port,
856 struct read_cache *rc = &ctxt->decode.io_read;
858 if (rc->pos == rc->end) { /* refill pio read ahead */
859 struct decode_cache *c = &ctxt->decode;
860 unsigned int in_page, n;
861 unsigned int count = c->rep_prefix ?
862 address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
863 in_page = (ctxt->eflags & EFLG_DF) ?
864 offset_in_page(c->regs[VCPU_REGS_RDI]) :
865 PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
866 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
870 rc->pos = rc->end = 0;
871 if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
876 memcpy(dest, rc->data + rc->pos, size);
881 static u32 desc_limit_scaled(struct desc_struct *desc)
883 u32 limit = get_desc_limit(desc);
885 return desc->g ? (limit << 12) | 0xfff : limit;
888 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
889 struct x86_emulate_ops *ops,
890 u16 selector, struct desc_ptr *dt)
892 if (selector & 1 << 2) {
893 struct desc_struct desc;
894 memset (dt, 0, sizeof *dt);
895 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
898 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
899 dt->address = get_desc_base(&desc);
901 ops->get_gdt(dt, ctxt->vcpu);
904 /* allowed just for 8 bytes segments */
905 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
906 struct x86_emulate_ops *ops,
907 u16 selector, struct desc_struct *desc)
910 u16 index = selector >> 3;
915 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
917 if (dt.size < index * 8 + 7) {
918 emulate_gp(ctxt, selector & 0xfffc);
919 return X86EMUL_PROPAGATE_FAULT;
921 addr = dt.address + index * 8;
922 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
923 if (ret == X86EMUL_PROPAGATE_FAULT)
924 emulate_pf(ctxt, addr, err);
929 /* allowed just for 8 bytes segments */
930 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
931 struct x86_emulate_ops *ops,
932 u16 selector, struct desc_struct *desc)
935 u16 index = selector >> 3;
940 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
942 if (dt.size < index * 8 + 7) {
943 emulate_gp(ctxt, selector & 0xfffc);
944 return X86EMUL_PROPAGATE_FAULT;
947 addr = dt.address + index * 8;
948 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
949 if (ret == X86EMUL_PROPAGATE_FAULT)
950 emulate_pf(ctxt, addr, err);
955 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
956 struct x86_emulate_ops *ops,
957 u16 selector, int seg)
959 struct desc_struct seg_desc;
961 unsigned err_vec = GP_VECTOR;
963 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
966 memset(&seg_desc, 0, sizeof seg_desc);
968 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
969 || ctxt->mode == X86EMUL_MODE_REAL) {
970 /* set real mode segment descriptor */
971 set_desc_base(&seg_desc, selector << 4);
972 set_desc_limit(&seg_desc, 0xffff);
979 /* NULL selector is not valid for TR, CS and SS */
980 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
984 /* TR should be in GDT only */
985 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
988 if (null_selector) /* for NULL selector skip all following checks */
991 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
992 if (ret != X86EMUL_CONTINUE)
995 err_code = selector & 0xfffc;
998 /* can't load system descriptor into segment selecor */
999 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1003 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1009 cpl = ops->cpl(ctxt->vcpu);
1014 * segment is not a writable data segment or segment
1015 * selector's RPL != CPL or segment selector's RPL != CPL
1017 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1021 if (!(seg_desc.type & 8))
1024 if (seg_desc.type & 4) {
1030 if (rpl > cpl || dpl != cpl)
1033 /* CS(RPL) <- CPL */
1034 selector = (selector & 0xfffc) | cpl;
1037 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1040 case VCPU_SREG_LDTR:
1041 if (seg_desc.s || seg_desc.type != 2)
1044 default: /* DS, ES, FS, or GS */
1046 * segment is not a data or readable code segment or
1047 * ((segment is a data or nonconforming code segment)
1048 * and (both RPL and CPL > DPL))
1050 if ((seg_desc.type & 0xa) == 0x8 ||
1051 (((seg_desc.type & 0xc) != 0xc) &&
1052 (rpl > dpl && cpl > dpl)))
1058 /* mark segment as accessed */
1060 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
1061 if (ret != X86EMUL_CONTINUE)
1065 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1066 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1067 return X86EMUL_CONTINUE;
1069 emulate_exception(ctxt, err_vec, err_code, true);
1070 return X86EMUL_PROPAGATE_FAULT;
1073 static void write_register_operand(struct operand *op)
1075 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1076 switch (op->bytes) {
1078 *(u8 *)op->addr.reg = (u8)op->val;
1081 *(u16 *)op->addr.reg = (u16)op->val;
1084 *op->addr.reg = (u32)op->val;
1085 break; /* 64b: zero-extend */
1087 *op->addr.reg = op->val;
1092 static inline int writeback(struct x86_emulate_ctxt *ctxt,
1093 struct x86_emulate_ops *ops)
1096 struct decode_cache *c = &ctxt->decode;
1099 switch (c->dst.type) {
1101 write_register_operand(&c->dst);
1105 rc = ops->cmpxchg_emulated(
1113 rc = ops->write_emulated(
1119 if (rc == X86EMUL_PROPAGATE_FAULT)
1120 emulate_pf(ctxt, c->dst.addr.mem, err);
1121 if (rc != X86EMUL_CONTINUE)
1130 return X86EMUL_CONTINUE;
1133 static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1134 struct x86_emulate_ops *ops)
1136 struct decode_cache *c = &ctxt->decode;
1138 c->dst.type = OP_MEM;
1139 c->dst.bytes = c->op_bytes;
1140 c->dst.val = c->src.val;
1141 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1142 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
1143 c->regs[VCPU_REGS_RSP]);
1146 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1147 struct x86_emulate_ops *ops,
1148 void *dest, int len)
1150 struct decode_cache *c = &ctxt->decode;
1153 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1154 c->regs[VCPU_REGS_RSP]),
1156 if (rc != X86EMUL_CONTINUE)
1159 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1163 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1164 struct x86_emulate_ops *ops,
1165 void *dest, int len)
1168 unsigned long val, change_mask;
1169 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1170 int cpl = ops->cpl(ctxt->vcpu);
1172 rc = emulate_pop(ctxt, ops, &val, len);
1173 if (rc != X86EMUL_CONTINUE)
1176 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1177 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1179 switch(ctxt->mode) {
1180 case X86EMUL_MODE_PROT64:
1181 case X86EMUL_MODE_PROT32:
1182 case X86EMUL_MODE_PROT16:
1184 change_mask |= EFLG_IOPL;
1186 change_mask |= EFLG_IF;
1188 case X86EMUL_MODE_VM86:
1190 emulate_gp(ctxt, 0);
1191 return X86EMUL_PROPAGATE_FAULT;
1193 change_mask |= EFLG_IF;
1195 default: /* real mode */
1196 change_mask |= (EFLG_IOPL | EFLG_IF);
1200 *(unsigned long *)dest =
1201 (ctxt->eflags & ~change_mask) | (val & change_mask);
1206 static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1207 struct x86_emulate_ops *ops, int seg)
1209 struct decode_cache *c = &ctxt->decode;
1211 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1213 emulate_push(ctxt, ops);
1216 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1217 struct x86_emulate_ops *ops, int seg)
1219 struct decode_cache *c = &ctxt->decode;
1220 unsigned long selector;
1223 rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1224 if (rc != X86EMUL_CONTINUE)
1227 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
1231 static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
1232 struct x86_emulate_ops *ops)
1234 struct decode_cache *c = &ctxt->decode;
1235 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
1236 int rc = X86EMUL_CONTINUE;
1237 int reg = VCPU_REGS_RAX;
1239 while (reg <= VCPU_REGS_RDI) {
1240 (reg == VCPU_REGS_RSP) ?
1241 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1243 emulate_push(ctxt, ops);
1245 rc = writeback(ctxt, ops);
1246 if (rc != X86EMUL_CONTINUE)
1252 /* Disable writeback. */
1253 c->dst.type = OP_NONE;
1258 static int emulate_popa(struct x86_emulate_ctxt *ctxt,
1259 struct x86_emulate_ops *ops)
1261 struct decode_cache *c = &ctxt->decode;
1262 int rc = X86EMUL_CONTINUE;
1263 int reg = VCPU_REGS_RDI;
1265 while (reg >= VCPU_REGS_RAX) {
1266 if (reg == VCPU_REGS_RSP) {
1267 register_address_increment(c, &c->regs[VCPU_REGS_RSP],
1272 rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1273 if (rc != X86EMUL_CONTINUE)
1280 int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1281 struct x86_emulate_ops *ops, int irq)
1283 struct decode_cache *c = &ctxt->decode;
1291 /* TODO: Add limit checks */
1292 c->src.val = ctxt->eflags;
1293 emulate_push(ctxt, ops);
1294 rc = writeback(ctxt, ops);
1295 if (rc != X86EMUL_CONTINUE)
1298 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1300 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1301 emulate_push(ctxt, ops);
1302 rc = writeback(ctxt, ops);
1303 if (rc != X86EMUL_CONTINUE)
1306 c->src.val = c->eip;
1307 emulate_push(ctxt, ops);
1308 rc = writeback(ctxt, ops);
1309 if (rc != X86EMUL_CONTINUE)
1312 c->dst.type = OP_NONE;
1314 ops->get_idt(&dt, ctxt->vcpu);
1316 eip_addr = dt.address + (irq << 2);
1317 cs_addr = dt.address + (irq << 2) + 2;
1319 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
1320 if (rc != X86EMUL_CONTINUE)
1323 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
1324 if (rc != X86EMUL_CONTINUE)
1327 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
1328 if (rc != X86EMUL_CONTINUE)
1336 static int emulate_int(struct x86_emulate_ctxt *ctxt,
1337 struct x86_emulate_ops *ops, int irq)
1339 switch(ctxt->mode) {
1340 case X86EMUL_MODE_REAL:
1341 return emulate_int_real(ctxt, ops, irq);
1342 case X86EMUL_MODE_VM86:
1343 case X86EMUL_MODE_PROT16:
1344 case X86EMUL_MODE_PROT32:
1345 case X86EMUL_MODE_PROT64:
1347 /* Protected mode interrupts unimplemented yet */
1348 return X86EMUL_UNHANDLEABLE;
1352 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1353 struct x86_emulate_ops *ops)
1355 struct decode_cache *c = &ctxt->decode;
1356 int rc = X86EMUL_CONTINUE;
1357 unsigned long temp_eip = 0;
1358 unsigned long temp_eflags = 0;
1359 unsigned long cs = 0;
1360 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1361 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1362 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1363 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1365 /* TODO: Add stack limit check */
1367 rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1369 if (rc != X86EMUL_CONTINUE)
1372 if (temp_eip & ~0xffff) {
1373 emulate_gp(ctxt, 0);
1374 return X86EMUL_PROPAGATE_FAULT;
1377 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1379 if (rc != X86EMUL_CONTINUE)
1382 rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1384 if (rc != X86EMUL_CONTINUE)
1387 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1389 if (rc != X86EMUL_CONTINUE)
1395 if (c->op_bytes == 4)
1396 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1397 else if (c->op_bytes == 2) {
1398 ctxt->eflags &= ~0xffff;
1399 ctxt->eflags |= temp_eflags;
1402 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1403 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1408 static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
1409 struct x86_emulate_ops* ops)
1411 switch(ctxt->mode) {
1412 case X86EMUL_MODE_REAL:
1413 return emulate_iret_real(ctxt, ops);
1414 case X86EMUL_MODE_VM86:
1415 case X86EMUL_MODE_PROT16:
1416 case X86EMUL_MODE_PROT32:
1417 case X86EMUL_MODE_PROT64:
1419 /* iret from protected mode unimplemented yet */
1420 return X86EMUL_UNHANDLEABLE;
1424 static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1425 struct x86_emulate_ops *ops)
1427 struct decode_cache *c = &ctxt->decode;
1429 return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1432 static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1434 struct decode_cache *c = &ctxt->decode;
1435 switch (c->modrm_reg) {
1437 emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1440 emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1443 emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1446 emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1448 case 4: /* sal/shl */
1449 case 6: /* sal/shl */
1450 emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1453 emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1456 emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1461 static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1462 struct x86_emulate_ops *ops)
1464 struct decode_cache *c = &ctxt->decode;
1465 unsigned long *rax = &c->regs[VCPU_REGS_RAX];
1466 unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1469 switch (c->modrm_reg) {
1470 case 0 ... 1: /* test */
1471 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1474 c->dst.val = ~c->dst.val;
1477 emulate_1op("neg", c->dst, ctxt->eflags);
1480 emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
1483 emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
1486 emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
1490 emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
1494 return X86EMUL_UNHANDLEABLE;
1497 return emulate_de(ctxt);
1498 return X86EMUL_CONTINUE;
1501 static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1502 struct x86_emulate_ops *ops)
1504 struct decode_cache *c = &ctxt->decode;
1506 switch (c->modrm_reg) {
1508 emulate_1op("inc", c->dst, ctxt->eflags);
1511 emulate_1op("dec", c->dst, ctxt->eflags);
1513 case 2: /* call near abs */ {
1516 c->eip = c->src.val;
1517 c->src.val = old_eip;
1518 emulate_push(ctxt, ops);
1521 case 4: /* jmp abs */
1522 c->eip = c->src.val;
1525 emulate_push(ctxt, ops);
1528 return X86EMUL_CONTINUE;
1531 static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1532 struct x86_emulate_ops *ops)
1534 struct decode_cache *c = &ctxt->decode;
1535 u64 old = c->dst.orig_val64;
1537 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
1538 ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
1539 c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1540 c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1541 ctxt->eflags &= ~EFLG_ZF;
1543 c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
1544 (u32) c->regs[VCPU_REGS_RBX];
1546 ctxt->eflags |= EFLG_ZF;
1548 return X86EMUL_CONTINUE;
1551 static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1552 struct x86_emulate_ops *ops)
1554 struct decode_cache *c = &ctxt->decode;
1558 rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1559 if (rc != X86EMUL_CONTINUE)
1561 if (c->op_bytes == 4)
1562 c->eip = (u32)c->eip;
1563 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1564 if (rc != X86EMUL_CONTINUE)
1566 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1570 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1571 struct x86_emulate_ops *ops, int seg)
1573 struct decode_cache *c = &ctxt->decode;
1577 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1579 rc = load_segment_descriptor(ctxt, ops, sel, seg);
1580 if (rc != X86EMUL_CONTINUE)
1583 c->dst.val = c->src.val;
1588 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1589 struct x86_emulate_ops *ops, struct desc_struct *cs,
1590 struct desc_struct *ss)
1592 memset(cs, 0, sizeof(struct desc_struct));
1593 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1594 memset(ss, 0, sizeof(struct desc_struct));
1596 cs->l = 0; /* will be adjusted later */
1597 set_desc_base(cs, 0); /* flat segment */
1598 cs->g = 1; /* 4kb granularity */
1599 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1600 cs->type = 0x0b; /* Read, Execute, Accessed */
1602 cs->dpl = 0; /* will be adjusted later */
1606 set_desc_base(ss, 0); /* flat segment */
1607 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1608 ss->g = 1; /* 4kb granularity */
1610 ss->type = 0x03; /* Read/Write, Accessed */
1611 ss->d = 1; /* 32bit stack segment */
1617 emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1619 struct decode_cache *c = &ctxt->decode;
1620 struct desc_struct cs, ss;
1624 /* syscall is not available in real mode */
1625 if (ctxt->mode == X86EMUL_MODE_REAL ||
1626 ctxt->mode == X86EMUL_MODE_VM86) {
1628 return X86EMUL_PROPAGATE_FAULT;
1631 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1633 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1635 cs_sel = (u16)(msr_data & 0xfffc);
1636 ss_sel = (u16)(msr_data + 8);
1638 if (is_long_mode(ctxt->vcpu)) {
1642 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1643 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1644 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1645 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1647 c->regs[VCPU_REGS_RCX] = c->eip;
1648 if (is_long_mode(ctxt->vcpu)) {
1649 #ifdef CONFIG_X86_64
1650 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1652 ops->get_msr(ctxt->vcpu,
1653 ctxt->mode == X86EMUL_MODE_PROT64 ?
1654 MSR_LSTAR : MSR_CSTAR, &msr_data);
1657 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1658 ctxt->eflags &= ~(msr_data | EFLG_RF);
1662 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1663 c->eip = (u32)msr_data;
1665 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1668 return X86EMUL_CONTINUE;
1672 emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1674 struct decode_cache *c = &ctxt->decode;
1675 struct desc_struct cs, ss;
1679 /* inject #GP if in real mode */
1680 if (ctxt->mode == X86EMUL_MODE_REAL) {
1681 emulate_gp(ctxt, 0);
1682 return X86EMUL_PROPAGATE_FAULT;
1685 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1686 * Therefore, we inject an #UD.
1688 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1690 return X86EMUL_PROPAGATE_FAULT;
1693 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1695 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1696 switch (ctxt->mode) {
1697 case X86EMUL_MODE_PROT32:
1698 if ((msr_data & 0xfffc) == 0x0) {
1699 emulate_gp(ctxt, 0);
1700 return X86EMUL_PROPAGATE_FAULT;
1703 case X86EMUL_MODE_PROT64:
1704 if (msr_data == 0x0) {
1705 emulate_gp(ctxt, 0);
1706 return X86EMUL_PROPAGATE_FAULT;
1711 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1712 cs_sel = (u16)msr_data;
1713 cs_sel &= ~SELECTOR_RPL_MASK;
1714 ss_sel = cs_sel + 8;
1715 ss_sel &= ~SELECTOR_RPL_MASK;
1716 if (ctxt->mode == X86EMUL_MODE_PROT64
1717 || is_long_mode(ctxt->vcpu)) {
1722 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1723 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1724 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1725 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1727 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1730 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1731 c->regs[VCPU_REGS_RSP] = msr_data;
1733 return X86EMUL_CONTINUE;
1737 emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1739 struct decode_cache *c = &ctxt->decode;
1740 struct desc_struct cs, ss;
1745 /* inject #GP if in real mode or Virtual 8086 mode */
1746 if (ctxt->mode == X86EMUL_MODE_REAL ||
1747 ctxt->mode == X86EMUL_MODE_VM86) {
1748 emulate_gp(ctxt, 0);
1749 return X86EMUL_PROPAGATE_FAULT;
1752 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1754 if ((c->rex_prefix & 0x8) != 0x0)
1755 usermode = X86EMUL_MODE_PROT64;
1757 usermode = X86EMUL_MODE_PROT32;
1761 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1763 case X86EMUL_MODE_PROT32:
1764 cs_sel = (u16)(msr_data + 16);
1765 if ((msr_data & 0xfffc) == 0x0) {
1766 emulate_gp(ctxt, 0);
1767 return X86EMUL_PROPAGATE_FAULT;
1769 ss_sel = (u16)(msr_data + 24);
1771 case X86EMUL_MODE_PROT64:
1772 cs_sel = (u16)(msr_data + 32);
1773 if (msr_data == 0x0) {
1774 emulate_gp(ctxt, 0);
1775 return X86EMUL_PROPAGATE_FAULT;
1777 ss_sel = cs_sel + 8;
1782 cs_sel |= SELECTOR_RPL_MASK;
1783 ss_sel |= SELECTOR_RPL_MASK;
1785 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1786 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1787 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1788 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1790 c->eip = c->regs[VCPU_REGS_RDX];
1791 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1793 return X86EMUL_CONTINUE;
1796 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
1797 struct x86_emulate_ops *ops)
1800 if (ctxt->mode == X86EMUL_MODE_REAL)
1802 if (ctxt->mode == X86EMUL_MODE_VM86)
1804 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1805 return ops->cpl(ctxt->vcpu) > iopl;
1808 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1809 struct x86_emulate_ops *ops,
1812 struct desc_struct tr_seg;
1815 u8 perm, bit_idx = port & 0x7;
1816 unsigned mask = (1 << len) - 1;
1818 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
1821 if (desc_limit_scaled(&tr_seg) < 103)
1823 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
1825 if (r != X86EMUL_CONTINUE)
1827 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1829 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
1830 &perm, 1, ctxt->vcpu, NULL);
1831 if (r != X86EMUL_CONTINUE)
1833 if ((perm >> bit_idx) & mask)
1838 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
1839 struct x86_emulate_ops *ops,
1845 if (emulator_bad_iopl(ctxt, ops))
1846 if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
1849 ctxt->perm_ok = true;
1854 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
1855 struct x86_emulate_ops *ops,
1856 struct tss_segment_16 *tss)
1858 struct decode_cache *c = &ctxt->decode;
1861 tss->flag = ctxt->eflags;
1862 tss->ax = c->regs[VCPU_REGS_RAX];
1863 tss->cx = c->regs[VCPU_REGS_RCX];
1864 tss->dx = c->regs[VCPU_REGS_RDX];
1865 tss->bx = c->regs[VCPU_REGS_RBX];
1866 tss->sp = c->regs[VCPU_REGS_RSP];
1867 tss->bp = c->regs[VCPU_REGS_RBP];
1868 tss->si = c->regs[VCPU_REGS_RSI];
1869 tss->di = c->regs[VCPU_REGS_RDI];
1871 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
1872 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
1873 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
1874 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
1875 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
1878 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1879 struct x86_emulate_ops *ops,
1880 struct tss_segment_16 *tss)
1882 struct decode_cache *c = &ctxt->decode;
1886 ctxt->eflags = tss->flag | 2;
1887 c->regs[VCPU_REGS_RAX] = tss->ax;
1888 c->regs[VCPU_REGS_RCX] = tss->cx;
1889 c->regs[VCPU_REGS_RDX] = tss->dx;
1890 c->regs[VCPU_REGS_RBX] = tss->bx;
1891 c->regs[VCPU_REGS_RSP] = tss->sp;
1892 c->regs[VCPU_REGS_RBP] = tss->bp;
1893 c->regs[VCPU_REGS_RSI] = tss->si;
1894 c->regs[VCPU_REGS_RDI] = tss->di;
1897 * SDM says that segment selectors are loaded before segment
1900 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
1901 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
1902 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
1903 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
1904 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
1907 * Now load segment descriptors. If fault happenes at this stage
1908 * it is handled in a context of new task
1910 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
1911 if (ret != X86EMUL_CONTINUE)
1913 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
1914 if (ret != X86EMUL_CONTINUE)
1916 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
1917 if (ret != X86EMUL_CONTINUE)
1919 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
1920 if (ret != X86EMUL_CONTINUE)
1922 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
1923 if (ret != X86EMUL_CONTINUE)
1926 return X86EMUL_CONTINUE;
1929 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1930 struct x86_emulate_ops *ops,
1931 u16 tss_selector, u16 old_tss_sel,
1932 ulong old_tss_base, struct desc_struct *new_desc)
1934 struct tss_segment_16 tss_seg;
1936 u32 err, new_tss_base = get_desc_base(new_desc);
1938 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1940 if (ret == X86EMUL_PROPAGATE_FAULT) {
1941 /* FIXME: need to provide precise fault address */
1942 emulate_pf(ctxt, old_tss_base, err);
1946 save_state_to_tss16(ctxt, ops, &tss_seg);
1948 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1950 if (ret == X86EMUL_PROPAGATE_FAULT) {
1951 /* FIXME: need to provide precise fault address */
1952 emulate_pf(ctxt, old_tss_base, err);
1956 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1958 if (ret == X86EMUL_PROPAGATE_FAULT) {
1959 /* FIXME: need to provide precise fault address */
1960 emulate_pf(ctxt, new_tss_base, err);
1964 if (old_tss_sel != 0xffff) {
1965 tss_seg.prev_task_link = old_tss_sel;
1967 ret = ops->write_std(new_tss_base,
1968 &tss_seg.prev_task_link,
1969 sizeof tss_seg.prev_task_link,
1971 if (ret == X86EMUL_PROPAGATE_FAULT) {
1972 /* FIXME: need to provide precise fault address */
1973 emulate_pf(ctxt, new_tss_base, err);
1978 return load_state_from_tss16(ctxt, ops, &tss_seg);
1981 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
1982 struct x86_emulate_ops *ops,
1983 struct tss_segment_32 *tss)
1985 struct decode_cache *c = &ctxt->decode;
1987 tss->cr3 = ops->get_cr(3, ctxt->vcpu);
1989 tss->eflags = ctxt->eflags;
1990 tss->eax = c->regs[VCPU_REGS_RAX];
1991 tss->ecx = c->regs[VCPU_REGS_RCX];
1992 tss->edx = c->regs[VCPU_REGS_RDX];
1993 tss->ebx = c->regs[VCPU_REGS_RBX];
1994 tss->esp = c->regs[VCPU_REGS_RSP];
1995 tss->ebp = c->regs[VCPU_REGS_RBP];
1996 tss->esi = c->regs[VCPU_REGS_RSI];
1997 tss->edi = c->regs[VCPU_REGS_RDI];
1999 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
2000 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2001 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
2002 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
2003 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
2004 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
2005 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
2008 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2009 struct x86_emulate_ops *ops,
2010 struct tss_segment_32 *tss)
2012 struct decode_cache *c = &ctxt->decode;
2015 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2016 emulate_gp(ctxt, 0);
2017 return X86EMUL_PROPAGATE_FAULT;
2020 ctxt->eflags = tss->eflags | 2;
2021 c->regs[VCPU_REGS_RAX] = tss->eax;
2022 c->regs[VCPU_REGS_RCX] = tss->ecx;
2023 c->regs[VCPU_REGS_RDX] = tss->edx;
2024 c->regs[VCPU_REGS_RBX] = tss->ebx;
2025 c->regs[VCPU_REGS_RSP] = tss->esp;
2026 c->regs[VCPU_REGS_RBP] = tss->ebp;
2027 c->regs[VCPU_REGS_RSI] = tss->esi;
2028 c->regs[VCPU_REGS_RDI] = tss->edi;
2031 * SDM says that segment selectors are loaded before segment
2034 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
2035 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
2036 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
2037 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
2038 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
2039 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
2040 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
2043 * Now load segment descriptors. If fault happenes at this stage
2044 * it is handled in a context of new task
2046 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
2047 if (ret != X86EMUL_CONTINUE)
2049 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
2050 if (ret != X86EMUL_CONTINUE)
2052 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
2053 if (ret != X86EMUL_CONTINUE)
2055 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
2056 if (ret != X86EMUL_CONTINUE)
2058 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
2059 if (ret != X86EMUL_CONTINUE)
2061 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
2062 if (ret != X86EMUL_CONTINUE)
2064 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
2065 if (ret != X86EMUL_CONTINUE)
2068 return X86EMUL_CONTINUE;
2071 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2072 struct x86_emulate_ops *ops,
2073 u16 tss_selector, u16 old_tss_sel,
2074 ulong old_tss_base, struct desc_struct *new_desc)
2076 struct tss_segment_32 tss_seg;
2078 u32 err, new_tss_base = get_desc_base(new_desc);
2080 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2082 if (ret == X86EMUL_PROPAGATE_FAULT) {
2083 /* FIXME: need to provide precise fault address */
2084 emulate_pf(ctxt, old_tss_base, err);
2088 save_state_to_tss32(ctxt, ops, &tss_seg);
2090 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2092 if (ret == X86EMUL_PROPAGATE_FAULT) {
2093 /* FIXME: need to provide precise fault address */
2094 emulate_pf(ctxt, old_tss_base, err);
2098 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2100 if (ret == X86EMUL_PROPAGATE_FAULT) {
2101 /* FIXME: need to provide precise fault address */
2102 emulate_pf(ctxt, new_tss_base, err);
2106 if (old_tss_sel != 0xffff) {
2107 tss_seg.prev_task_link = old_tss_sel;
2109 ret = ops->write_std(new_tss_base,
2110 &tss_seg.prev_task_link,
2111 sizeof tss_seg.prev_task_link,
2113 if (ret == X86EMUL_PROPAGATE_FAULT) {
2114 /* FIXME: need to provide precise fault address */
2115 emulate_pf(ctxt, new_tss_base, err);
2120 return load_state_from_tss32(ctxt, ops, &tss_seg);
2123 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2124 struct x86_emulate_ops *ops,
2125 u16 tss_selector, int reason,
2126 bool has_error_code, u32 error_code)
2128 struct desc_struct curr_tss_desc, next_tss_desc;
2130 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
2131 ulong old_tss_base =
2132 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2135 /* FIXME: old_tss_base == ~0 ? */
2137 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
2138 if (ret != X86EMUL_CONTINUE)
2140 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
2141 if (ret != X86EMUL_CONTINUE)
2144 /* FIXME: check that next_tss_desc is tss */
2146 if (reason != TASK_SWITCH_IRET) {
2147 if ((tss_selector & 3) > next_tss_desc.dpl ||
2148 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2149 emulate_gp(ctxt, 0);
2150 return X86EMUL_PROPAGATE_FAULT;
2154 desc_limit = desc_limit_scaled(&next_tss_desc);
2155 if (!next_tss_desc.p ||
2156 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2157 desc_limit < 0x2b)) {
2158 emulate_ts(ctxt, tss_selector & 0xfffc);
2159 return X86EMUL_PROPAGATE_FAULT;
2162 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2163 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2164 write_segment_descriptor(ctxt, ops, old_tss_sel,
2168 if (reason == TASK_SWITCH_IRET)
2169 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2171 /* set back link to prev task only if NT bit is set in eflags
2172 note that old_tss_sel is not used afetr this point */
2173 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2174 old_tss_sel = 0xffff;
2176 if (next_tss_desc.type & 8)
2177 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
2178 old_tss_base, &next_tss_desc);
2180 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
2181 old_tss_base, &next_tss_desc);
2182 if (ret != X86EMUL_CONTINUE)
2185 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2186 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2188 if (reason != TASK_SWITCH_IRET) {
2189 next_tss_desc.type |= (1 << 1); /* set busy flag */
2190 write_segment_descriptor(ctxt, ops, tss_selector,
2194 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2195 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
2196 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2198 if (has_error_code) {
2199 struct decode_cache *c = &ctxt->decode;
2201 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2203 c->src.val = (unsigned long) error_code;
2204 emulate_push(ctxt, ops);
2210 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2211 u16 tss_selector, int reason,
2212 bool has_error_code, u32 error_code)
2214 struct x86_emulate_ops *ops = ctxt->ops;
2215 struct decode_cache *c = &ctxt->decode;
2219 c->dst.type = OP_NONE;
2221 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
2222 has_error_code, error_code);
2224 if (rc == X86EMUL_CONTINUE) {
2225 rc = writeback(ctxt, ops);
2226 if (rc == X86EMUL_CONTINUE)
2230 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2233 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2234 int reg, struct operand *op)
2236 struct decode_cache *c = &ctxt->decode;
2237 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2239 register_address_increment(c, &c->regs[reg], df * op->bytes);
2240 op->addr.mem = register_address(c, base, c->regs[reg]);
2243 static int em_push(struct x86_emulate_ctxt *ctxt)
2245 emulate_push(ctxt, ctxt->ops);
2246 return X86EMUL_CONTINUE;
2249 static int em_das(struct x86_emulate_ctxt *ctxt)
2251 struct decode_cache *c = &ctxt->decode;
2253 bool af, cf, old_cf;
2255 cf = ctxt->eflags & X86_EFLAGS_CF;
2261 af = ctxt->eflags & X86_EFLAGS_AF;
2262 if ((al & 0x0f) > 9 || af) {
2264 cf = old_cf | (al >= 250);
2269 if (old_al > 0x99 || old_cf) {
2275 /* Set PF, ZF, SF */
2276 c->src.type = OP_IMM;
2279 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2280 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2282 ctxt->eflags |= X86_EFLAGS_CF;
2284 ctxt->eflags |= X86_EFLAGS_AF;
2285 return X86EMUL_CONTINUE;
2288 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2290 struct decode_cache *c = &ctxt->decode;
2295 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
2298 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2299 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
2300 return X86EMUL_CONTINUE;
2303 memcpy(&c->eip, c->src.valptr, c->op_bytes);
2305 c->src.val = old_cs;
2306 emulate_push(ctxt, ctxt->ops);
2307 rc = writeback(ctxt, ctxt->ops);
2308 if (rc != X86EMUL_CONTINUE)
2311 c->src.val = old_eip;
2312 emulate_push(ctxt, ctxt->ops);
2313 rc = writeback(ctxt, ctxt->ops);
2314 if (rc != X86EMUL_CONTINUE)
2317 c->dst.type = OP_NONE;
2319 return X86EMUL_CONTINUE;
2322 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2324 struct decode_cache *c = &ctxt->decode;
2327 c->dst.type = OP_REG;
2328 c->dst.addr.reg = &c->eip;
2329 c->dst.bytes = c->op_bytes;
2330 rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
2331 if (rc != X86EMUL_CONTINUE)
2333 register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
2334 return X86EMUL_CONTINUE;
2337 static int em_imul(struct x86_emulate_ctxt *ctxt)
2339 struct decode_cache *c = &ctxt->decode;
2341 emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
2342 return X86EMUL_CONTINUE;
2345 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2347 struct decode_cache *c = &ctxt->decode;
2349 c->dst.val = c->src2.val;
2350 return em_imul(ctxt);
2353 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2355 struct decode_cache *c = &ctxt->decode;
2357 c->dst.type = OP_REG;
2358 c->dst.bytes = c->src.bytes;
2359 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
2360 c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);
2362 return X86EMUL_CONTINUE;
2365 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2367 unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
2368 struct decode_cache *c = &ctxt->decode;
2371 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
2372 emulate_gp(ctxt, 0);
2373 return X86EMUL_PROPAGATE_FAULT;
2375 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2376 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2377 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2378 return X86EMUL_CONTINUE;
2381 #define D(_y) { .flags = (_y) }
2383 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2384 #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2385 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2387 #define D2bv(_f) D((_f) | ByteOp), D(_f)
2388 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
2390 #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \
2391 D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \
2392 D2bv(((_f) & ~Lock) | DstAcc | SrcImm)
2395 static struct opcode group1[] = {
2399 static struct opcode group1A[] = {
2400 D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
2403 static struct opcode group3[] = {
2404 D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
2405 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2406 X4(D(SrcMem | ModRM)),
2409 static struct opcode group4[] = {
2410 D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
2414 static struct opcode group5[] = {
2415 D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2416 D(SrcMem | ModRM | Stack),
2417 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2418 D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
2419 D(SrcMem | ModRM | Stack), N,
2422 static struct group_dual group7 = { {
2423 N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
2424 D(SrcNone | ModRM | DstMem | Mov), N,
2425 D(SrcMem16 | ModRM | Mov | Priv),
2426 D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2428 D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
2429 D(SrcNone | ModRM | DstMem | Mov), N,
2430 D(SrcMem16 | ModRM | Mov | Priv), N,
2433 static struct opcode group8[] = {
2435 D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
2436 D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
2439 static struct group_dual group9 = { {
2440 N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
2442 N, N, N, N, N, N, N, N,
2445 static struct opcode opcode_table[256] = {
2448 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2451 D(ImplicitOps | Stack | No64), N,
2454 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2457 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2461 D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2469 X8(I(SrcReg | Stack, em_push)),
2471 X8(D(DstReg | Stack)),
2473 D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
2474 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
2477 I(SrcImm | Mov | Stack, em_push),
2478 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2479 I(SrcImmByte | Mov | Stack, em_push),
2480 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2481 D2bv(DstDI | Mov | String), /* insb, insw/insd */
2482 D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2486 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
2487 G(DstMem | SrcImm | ModRM | Group, group1),
2488 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
2489 G(DstMem | SrcImmByte | ModRM | Group, group1),
2490 D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2492 D2bv(DstMem | SrcReg | ModRM | Mov),
2493 D2bv(DstReg | SrcMem | ModRM | Mov),
2494 D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2495 D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
2497 X8(D(SrcAcc | DstReg)),
2499 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2500 I(SrcImmFAddr | No64, em_call_far), N,
2501 D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
2503 D2bv(DstAcc | SrcMem | Mov | MemAbs),
2504 D2bv(DstMem | SrcAcc | Mov | MemAbs),
2505 D2bv(SrcSI | DstDI | Mov | String), D2bv(SrcSI | DstDI | String),
2507 D2bv(DstAcc | SrcImm),
2508 D2bv(SrcAcc | DstDI | Mov | String),
2509 D2bv(SrcSI | DstAcc | Mov | String),
2510 D2bv(SrcAcc | DstDI | String),
2512 X8(D(ByteOp | DstReg | SrcImm | Mov)),
2514 X8(D(DstReg | SrcImm | Mov)),
2516 D2bv(DstMem | SrcImmByte | ModRM),
2517 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
2518 D(ImplicitOps | Stack),
2519 D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2520 D2bv(DstMem | SrcImm | ModRM | Mov),
2522 N, N, N, D(ImplicitOps | Stack),
2523 D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
2525 D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2528 N, N, N, N, N, N, N, N,
2531 D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2533 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
2534 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2535 D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps),
2538 D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
2540 D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2541 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
2544 static struct opcode twobyte_table[256] = {
2546 N, GD(0, &group7), N, N,
2547 N, D(ImplicitOps), D(ImplicitOps | Priv), N,
2548 D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
2549 N, D(ImplicitOps | ModRM), N, N,
2551 N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
2553 D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
2554 D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2556 N, N, N, N, N, N, N, N,
2558 D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
2559 D(ImplicitOps | Priv), N,
2560 D(ImplicitOps), D(ImplicitOps | Priv), N, N,
2561 N, N, N, N, N, N, N, N,
2563 X16(D(DstReg | SrcMem | ModRM | Mov)),
2565 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2567 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2569 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2573 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2575 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2576 N, D(DstMem | SrcReg | ModRM | BitOp),
2577 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2578 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
2580 D(ImplicitOps | Stack), D(ImplicitOps | Stack),
2581 N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
2582 D(DstMem | SrcReg | Src2ImmByte | ModRM),
2583 D(DstMem | SrcReg | Src2CL | ModRM),
2584 D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2586 D2bv(DstMem | SrcReg | ModRM | Lock),
2587 D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2588 D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
2589 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2592 G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2593 D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2594 D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2596 D2bv(DstMem | SrcReg | ModRM | Lock),
2597 N, D(DstMem | SrcReg | ModRM | Mov),
2598 N, N, N, GD(0, &group9),
2599 N, N, N, N, N, N, N, N,
2601 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2603 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
2605 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
2618 static unsigned imm_size(struct decode_cache *c)
2622 size = (c->d & ByteOp) ? 1 : c->op_bytes;
2628 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2629 unsigned size, bool sign_extension)
2631 struct decode_cache *c = &ctxt->decode;
2632 struct x86_emulate_ops *ops = ctxt->ops;
2633 int rc = X86EMUL_CONTINUE;
2637 op->addr.mem = c->eip;
2638 /* NB. Immediates are sign-extended as necessary. */
2639 switch (op->bytes) {
2641 op->val = insn_fetch(s8, 1, c->eip);
2644 op->val = insn_fetch(s16, 2, c->eip);
2647 op->val = insn_fetch(s32, 4, c->eip);
2650 if (!sign_extension) {
2651 switch (op->bytes) {
2659 op->val &= 0xffffffff;
2668 x86_decode_insn(struct x86_emulate_ctxt *ctxt)
2670 struct x86_emulate_ops *ops = ctxt->ops;
2671 struct decode_cache *c = &ctxt->decode;
2672 int rc = X86EMUL_CONTINUE;
2673 int mode = ctxt->mode;
2674 int def_op_bytes, def_ad_bytes, dual, goffset;
2675 struct opcode opcode, *g_mod012, *g_mod3;
2676 struct operand memop = { .type = OP_NONE };
2679 c->fetch.start = c->fetch.end = c->eip;
2680 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
2683 case X86EMUL_MODE_REAL:
2684 case X86EMUL_MODE_VM86:
2685 case X86EMUL_MODE_PROT16:
2686 def_op_bytes = def_ad_bytes = 2;
2688 case X86EMUL_MODE_PROT32:
2689 def_op_bytes = def_ad_bytes = 4;
2691 #ifdef CONFIG_X86_64
2692 case X86EMUL_MODE_PROT64:
2701 c->op_bytes = def_op_bytes;
2702 c->ad_bytes = def_ad_bytes;
2704 /* Legacy prefixes. */
2706 switch (c->b = insn_fetch(u8, 1, c->eip)) {
2707 case 0x66: /* operand-size override */
2708 /* switch between 2/4 bytes */
2709 c->op_bytes = def_op_bytes ^ 6;
2711 case 0x67: /* address-size override */
2712 if (mode == X86EMUL_MODE_PROT64)
2713 /* switch between 4/8 bytes */
2714 c->ad_bytes = def_ad_bytes ^ 12;
2716 /* switch between 2/4 bytes */
2717 c->ad_bytes = def_ad_bytes ^ 6;
2719 case 0x26: /* ES override */
2720 case 0x2e: /* CS override */
2721 case 0x36: /* SS override */
2722 case 0x3e: /* DS override */
2723 set_seg_override(c, (c->b >> 3) & 3);
2725 case 0x64: /* FS override */
2726 case 0x65: /* GS override */
2727 set_seg_override(c, c->b & 7);
2729 case 0x40 ... 0x4f: /* REX */
2730 if (mode != X86EMUL_MODE_PROT64)
2732 c->rex_prefix = c->b;
2734 case 0xf0: /* LOCK */
2737 case 0xf2: /* REPNE/REPNZ */
2738 c->rep_prefix = REPNE_PREFIX;
2740 case 0xf3: /* REP/REPE/REPZ */
2741 c->rep_prefix = REPE_PREFIX;
2747 /* Any legacy prefix after a REX prefix nullifies its effect. */
2755 if (c->rex_prefix & 8)
2756 c->op_bytes = 8; /* REX.W */
2758 /* Opcode byte(s). */
2759 opcode = opcode_table[c->b];
2760 /* Two-byte opcode? */
2763 c->b = insn_fetch(u8, 1, c->eip);
2764 opcode = twobyte_table[c->b];
2766 c->d = opcode.flags;
2769 dual = c->d & GroupDual;
2770 c->modrm = insn_fetch(u8, 1, c->eip);
2773 if (c->d & GroupDual) {
2774 g_mod012 = opcode.u.gdual->mod012;
2775 g_mod3 = opcode.u.gdual->mod3;
2777 g_mod012 = g_mod3 = opcode.u.group;
2779 c->d &= ~(Group | GroupDual);
2781 goffset = (c->modrm >> 3) & 7;
2783 if ((c->modrm >> 6) == 3)
2784 opcode = g_mod3[goffset];
2786 opcode = g_mod012[goffset];
2787 c->d |= opcode.flags;
2790 c->execute = opcode.u.execute;
2793 if (c->d == 0 || (c->d & Undefined)) {
2794 DPRINTF("Cannot emulate %02x\n", c->b);
2798 if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
2801 if (c->d & Op3264) {
2802 if (mode == X86EMUL_MODE_PROT64)
2808 /* ModRM and SIB bytes. */
2810 rc = decode_modrm(ctxt, ops, &memop);
2811 if (!c->has_seg_override)
2812 set_seg_override(c, c->modrm_seg);
2813 } else if (c->d & MemAbs)
2814 rc = decode_abs(ctxt, ops, &memop);
2815 if (rc != X86EMUL_CONTINUE)
2818 if (!c->has_seg_override)
2819 set_seg_override(c, VCPU_SREG_DS);
2821 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
2822 memop.addr.mem += seg_override_base(ctxt, ops, c);
2824 if (memop.type == OP_MEM && c->ad_bytes != 8)
2825 memop.addr.mem = (u32)memop.addr.mem;
2827 if (memop.type == OP_MEM && c->rip_relative)
2828 memop.addr.mem += c->eip;
2831 * Decode and fetch the source operand: register, memory
2834 switch (c->d & SrcMask) {
2838 decode_register_operand(&c->src, c, 0);
2847 memop.bytes = (c->d & ByteOp) ? 1 :
2853 rc = decode_imm(ctxt, &c->src, 2, false);
2856 rc = decode_imm(ctxt, &c->src, imm_size(c), true);
2859 rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2862 rc = decode_imm(ctxt, &c->src, 1, true);
2865 rc = decode_imm(ctxt, &c->src, 1, false);
2868 c->src.type = OP_REG;
2869 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2870 c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2871 fetch_register_operand(&c->src);
2878 c->src.type = OP_MEM;
2879 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2881 register_address(c, seg_override_base(ctxt, ops, c),
2882 c->regs[VCPU_REGS_RSI]);
2886 c->src.type = OP_IMM;
2887 c->src.addr.mem = c->eip;
2888 c->src.bytes = c->op_bytes + 2;
2889 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2892 memop.bytes = c->op_bytes + 2;
2897 if (rc != X86EMUL_CONTINUE)
2901 * Decode and fetch the second source operand: register, memory
2904 switch (c->d & Src2Mask) {
2909 c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
2912 rc = decode_imm(ctxt, &c->src2, 1, true);
2919 rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
2923 if (rc != X86EMUL_CONTINUE)
2926 /* Decode and fetch the destination operand: register or memory. */
2927 switch (c->d & DstMask) {
2929 decode_register_operand(&c->dst, c,
2930 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2933 c->dst.type = OP_IMM;
2934 c->dst.addr.mem = c->eip;
2936 c->dst.val = insn_fetch(u8, 1, c->eip);
2941 if ((c->d & DstMask) == DstMem64)
2944 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2946 fetch_bit_operand(c);
2947 c->dst.orig_val = c->dst.val;
2950 c->dst.type = OP_REG;
2951 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2952 c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2953 fetch_register_operand(&c->dst);
2954 c->dst.orig_val = c->dst.val;
2957 c->dst.type = OP_MEM;
2958 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2960 register_address(c, es_base(ctxt, ops),
2961 c->regs[VCPU_REGS_RDI]);
2965 /* Special instructions do their own operand decoding. */
2967 c->dst.type = OP_NONE; /* Disable writeback. */
2972 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2975 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
2977 struct decode_cache *c = &ctxt->decode;
2979 /* The second termination condition only applies for REPE
2980 * and REPNE. Test if the repeat string operation prefix is
2981 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
2982 * corresponding termination condition according to:
2983 * - if REPE/REPZ and ZF = 0 then done
2984 * - if REPNE/REPNZ and ZF = 1 then done
2986 if (((c->b == 0xa6) || (c->b == 0xa7) ||
2987 (c->b == 0xae) || (c->b == 0xaf))
2988 && (((c->rep_prefix == REPE_PREFIX) &&
2989 ((ctxt->eflags & EFLG_ZF) == 0))
2990 || ((c->rep_prefix == REPNE_PREFIX) &&
2991 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
2998 x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3000 struct x86_emulate_ops *ops = ctxt->ops;
3002 struct decode_cache *c = &ctxt->decode;
3003 int rc = X86EMUL_CONTINUE;
3004 int saved_dst_type = c->dst.type;
3005 int irq; /* Used for int 3, int, and into */
3007 ctxt->decode.mem_read.pos = 0;
3009 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3014 /* LOCK prefix is allowed only with some instructions */
3015 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3020 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3025 /* Privileged instruction can be executed only in CPL=0 */
3026 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3027 emulate_gp(ctxt, 0);
3031 if (c->rep_prefix && (c->d & String)) {
3032 /* All REP prefixes have the same first termination condition */
3033 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3039 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3040 rc = read_emulated(ctxt, ops, c->src.addr.mem,
3041 c->src.valptr, c->src.bytes);
3042 if (rc != X86EMUL_CONTINUE)
3044 c->src.orig_val64 = c->src.val64;
3047 if (c->src2.type == OP_MEM) {
3048 rc = read_emulated(ctxt, ops, c->src2.addr.mem,
3049 &c->src2.val, c->src2.bytes);
3050 if (rc != X86EMUL_CONTINUE)
3054 if ((c->d & DstMask) == ImplicitOps)
3058 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3059 /* optimisation - avoid slow emulated read if Mov */
3060 rc = read_emulated(ctxt, ops, c->dst.addr.mem,
3061 &c->dst.val, c->dst.bytes);
3062 if (rc != X86EMUL_CONTINUE)
3065 c->dst.orig_val = c->dst.val;
3070 rc = c->execute(ctxt);
3071 if (rc != X86EMUL_CONTINUE)
3082 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3084 case 0x06: /* push es */
3085 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3087 case 0x07: /* pop es */
3088 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
3089 if (rc != X86EMUL_CONTINUE)
3094 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
3096 case 0x0e: /* push cs */
3097 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3101 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
3103 case 0x16: /* push ss */
3104 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3106 case 0x17: /* pop ss */
3107 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
3108 if (rc != X86EMUL_CONTINUE)
3113 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
3115 case 0x1e: /* push ds */
3116 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3118 case 0x1f: /* pop ds */
3119 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
3120 if (rc != X86EMUL_CONTINUE)
3125 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
3129 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
3133 emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
3137 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3139 case 0x40 ... 0x47: /* inc r16/r32 */
3140 emulate_1op("inc", c->dst, ctxt->eflags);
3142 case 0x48 ... 0x4f: /* dec r16/r32 */
3143 emulate_1op("dec", c->dst, ctxt->eflags);
3145 case 0x58 ... 0x5f: /* pop reg */
3147 rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3148 if (rc != X86EMUL_CONTINUE)
3151 case 0x60: /* pusha */
3152 rc = emulate_pusha(ctxt, ops);
3153 if (rc != X86EMUL_CONTINUE)
3156 case 0x61: /* popa */
3157 rc = emulate_popa(ctxt, ops);
3158 if (rc != X86EMUL_CONTINUE)
3161 case 0x63: /* movsxd */
3162 if (ctxt->mode != X86EMUL_MODE_PROT64)
3163 goto cannot_emulate;
3164 c->dst.val = (s32) c->src.val;
3166 case 0x6c: /* insb */
3167 case 0x6d: /* insw/insd */
3168 c->src.val = c->regs[VCPU_REGS_RDX];
3170 case 0x6e: /* outsb */
3171 case 0x6f: /* outsw/outsd */
3172 c->dst.val = c->regs[VCPU_REGS_RDX];
3175 case 0x70 ... 0x7f: /* jcc (short) */
3176 if (test_cc(c->b, ctxt->eflags))
3177 jmp_rel(c, c->src.val);
3179 case 0x80 ... 0x83: /* Grp1 */
3180 switch (c->modrm_reg) {
3201 emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
3203 case 0x86 ... 0x87: /* xchg */
3205 /* Write back the register source. */
3206 c->src.val = c->dst.val;
3207 write_register_operand(&c->src);
3209 * Write back the memory destination with implicit LOCK
3212 c->dst.val = c->src.orig_val;
3215 case 0x88 ... 0x8b: /* mov */
3217 case 0x8c: /* mov r/m, sreg */
3218 if (c->modrm_reg > VCPU_SREG_GS) {
3222 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3224 case 0x8d: /* lea r16/r32, m */
3225 c->dst.val = c->src.addr.mem;
3227 case 0x8e: { /* mov seg, r/m16 */
3232 if (c->modrm_reg == VCPU_SREG_CS ||
3233 c->modrm_reg > VCPU_SREG_GS) {
3238 if (c->modrm_reg == VCPU_SREG_SS)
3239 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3241 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3243 c->dst.type = OP_NONE; /* Disable writeback. */
3246 case 0x8f: /* pop (sole member of Grp1a) */
3247 rc = emulate_grp1a(ctxt, ops);
3248 if (rc != X86EMUL_CONTINUE)
3251 case 0x90 ... 0x97: /* nop / xchg reg, rax */
3252 if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3255 case 0x98: /* cbw/cwde/cdqe */
3256 switch (c->op_bytes) {
3257 case 2: c->dst.val = (s8)c->dst.val; break;
3258 case 4: c->dst.val = (s16)c->dst.val; break;
3259 case 8: c->dst.val = (s32)c->dst.val; break;
3262 case 0x9c: /* pushf */
3263 c->src.val = (unsigned long) ctxt->eflags;
3264 emulate_push(ctxt, ops);
3266 case 0x9d: /* popf */
3267 c->dst.type = OP_REG;
3268 c->dst.addr.reg = &ctxt->eflags;
3269 c->dst.bytes = c->op_bytes;
3270 rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
3271 if (rc != X86EMUL_CONTINUE)
3274 case 0xa0 ... 0xa3: /* mov */
3275 case 0xa4 ... 0xa5: /* movs */
3277 case 0xa6 ... 0xa7: /* cmps */
3278 c->dst.type = OP_NONE; /* Disable writeback. */
3279 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3281 case 0xa8 ... 0xa9: /* test ax, imm */
3283 case 0xaa ... 0xab: /* stos */
3284 case 0xac ... 0xad: /* lods */
3286 case 0xae ... 0xaf: /* scas */
3288 case 0xb0 ... 0xbf: /* mov r, imm */
3293 case 0xc3: /* ret */
3294 c->dst.type = OP_REG;
3295 c->dst.addr.reg = &c->eip;
3296 c->dst.bytes = c->op_bytes;
3297 goto pop_instruction;
3298 case 0xc4: /* les */
3299 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
3300 if (rc != X86EMUL_CONTINUE)
3303 case 0xc5: /* lds */
3304 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
3305 if (rc != X86EMUL_CONTINUE)
3308 case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */
3310 c->dst.val = c->src.val;
3312 case 0xcb: /* ret far */
3313 rc = emulate_ret_far(ctxt, ops);
3314 if (rc != X86EMUL_CONTINUE)
3317 case 0xcc: /* int3 */
3320 case 0xcd: /* int n */
3323 rc = emulate_int(ctxt, ops, irq);
3324 if (rc != X86EMUL_CONTINUE)
3327 case 0xce: /* into */
3328 if (ctxt->eflags & EFLG_OF) {
3333 case 0xcf: /* iret */
3334 rc = emulate_iret(ctxt, ops);
3336 if (rc != X86EMUL_CONTINUE)
3339 case 0xd0 ... 0xd1: /* Grp2 */
3342 case 0xd2 ... 0xd3: /* Grp2 */
3343 c->src.val = c->regs[VCPU_REGS_RCX];
3346 case 0xe0 ... 0xe2: /* loop/loopz/loopnz */
3347 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3348 if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
3349 (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
3350 jmp_rel(c, c->src.val);
3352 case 0xe3: /* jcxz/jecxz/jrcxz */
3353 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
3354 jmp_rel(c, c->src.val);
3356 case 0xe4: /* inb */
3359 case 0xe6: /* outb */
3360 case 0xe7: /* out */
3362 case 0xe8: /* call (near) */ {
3363 long int rel = c->src.val;
3364 c->src.val = (unsigned long) c->eip;
3366 emulate_push(ctxt, ops);
3369 case 0xe9: /* jmp rel */
3371 case 0xea: { /* jmp far */
3374 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
3376 if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3380 memcpy(&c->eip, c->src.valptr, c->op_bytes);
3384 jmp: /* jmp rel short */
3385 jmp_rel(c, c->src.val);
3386 c->dst.type = OP_NONE; /* Disable writeback. */
3388 case 0xec: /* in al,dx */
3389 case 0xed: /* in (e/r)ax,dx */
3390 c->src.val = c->regs[VCPU_REGS_RDX];
3392 c->dst.bytes = min(c->dst.bytes, 4u);
3393 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3394 emulate_gp(ctxt, 0);
3397 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
3399 goto done; /* IO is needed */
3401 case 0xee: /* out dx,al */
3402 case 0xef: /* out dx,(e/r)ax */
3403 c->dst.val = c->regs[VCPU_REGS_RDX];
3405 c->src.bytes = min(c->src.bytes, 4u);
3406 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3408 emulate_gp(ctxt, 0);
3411 ops->pio_out_emulated(c->src.bytes, c->dst.val,
3412 &c->src.val, 1, ctxt->vcpu);
3413 c->dst.type = OP_NONE; /* Disable writeback. */
3415 case 0xf4: /* hlt */
3416 ctxt->vcpu->arch.halt_request = 1;
3418 case 0xf5: /* cmc */
3419 /* complement carry flag from eflags reg */
3420 ctxt->eflags ^= EFLG_CF;
3422 case 0xf6 ... 0xf7: /* Grp3 */
3423 rc = emulate_grp3(ctxt, ops);
3424 if (rc != X86EMUL_CONTINUE)
3427 case 0xf8: /* clc */
3428 ctxt->eflags &= ~EFLG_CF;
3430 case 0xf9: /* stc */
3431 ctxt->eflags |= EFLG_CF;
3433 case 0xfa: /* cli */
3434 if (emulator_bad_iopl(ctxt, ops)) {
3435 emulate_gp(ctxt, 0);
3438 ctxt->eflags &= ~X86_EFLAGS_IF;
3440 case 0xfb: /* sti */
3441 if (emulator_bad_iopl(ctxt, ops)) {
3442 emulate_gp(ctxt, 0);
3445 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3446 ctxt->eflags |= X86_EFLAGS_IF;
3449 case 0xfc: /* cld */
3450 ctxt->eflags &= ~EFLG_DF;
3452 case 0xfd: /* std */
3453 ctxt->eflags |= EFLG_DF;
3455 case 0xfe: /* Grp4 */
3457 rc = emulate_grp45(ctxt, ops);
3458 if (rc != X86EMUL_CONTINUE)
3461 case 0xff: /* Grp5 */
3462 if (c->modrm_reg == 5)
3466 goto cannot_emulate;
3470 rc = writeback(ctxt, ops);
3471 if (rc != X86EMUL_CONTINUE)
3475 * restore dst type in case the decoding will be reused
3476 * (happens for string instruction )
3478 c->dst.type = saved_dst_type;
3480 if ((c->d & SrcMask) == SrcSI)
3481 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
3482 VCPU_REGS_RSI, &c->src);
3484 if ((c->d & DstMask) == DstDI)
3485 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3488 if (c->rep_prefix && (c->d & String)) {
3489 struct read_cache *r = &ctxt->decode.io_read;
3490 register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3492 if (!string_insn_completed(ctxt)) {
3494 * Re-enter guest when pio read ahead buffer is empty
3495 * or, if it is not used, after each 1024 iteration.
3497 if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
3498 (r->end == 0 || r->end != r->pos)) {
3500 * Reset read cache. Usually happens before
3501 * decode, but since instruction is restarted
3502 * we have to do it here.
3504 ctxt->decode.mem_read.end = 0;
3505 return EMULATION_RESTART;
3507 goto done; /* skip rip writeback */
3514 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3518 case 0x01: /* lgdt, lidt, lmsw */
3519 switch (c->modrm_reg) {
3521 unsigned long address;
3523 case 0: /* vmcall */
3524 if (c->modrm_mod != 3 || c->modrm_rm != 1)
3525 goto cannot_emulate;
3527 rc = kvm_fix_hypercall(ctxt->vcpu);
3528 if (rc != X86EMUL_CONTINUE)
3531 /* Let the processor re-execute the fixed hypercall */
3533 /* Disable writeback. */
3534 c->dst.type = OP_NONE;
3537 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3538 &size, &address, c->op_bytes);
3539 if (rc != X86EMUL_CONTINUE)
3541 realmode_lgdt(ctxt->vcpu, size, address);
3542 /* Disable writeback. */
3543 c->dst.type = OP_NONE;
3545 case 3: /* lidt/vmmcall */
3546 if (c->modrm_mod == 3) {
3547 switch (c->modrm_rm) {
3549 rc = kvm_fix_hypercall(ctxt->vcpu);
3550 if (rc != X86EMUL_CONTINUE)
3554 goto cannot_emulate;
3557 rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3560 if (rc != X86EMUL_CONTINUE)
3562 realmode_lidt(ctxt->vcpu, size, address);
3564 /* Disable writeback. */
3565 c->dst.type = OP_NONE;
3569 c->dst.val = ops->get_cr(0, ctxt->vcpu);
3572 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3573 (c->src.val & 0x0f), ctxt->vcpu);
3574 c->dst.type = OP_NONE;
3576 case 5: /* not defined */
3580 emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3581 /* Disable writeback. */
3582 c->dst.type = OP_NONE;
3585 goto cannot_emulate;
3588 case 0x05: /* syscall */
3589 rc = emulate_syscall(ctxt, ops);
3590 if (rc != X86EMUL_CONTINUE)
3596 emulate_clts(ctxt->vcpu);
3598 case 0x09: /* wbinvd */
3599 kvm_emulate_wbinvd(ctxt->vcpu);
3601 case 0x08: /* invd */
3602 case 0x0d: /* GrpP (prefetch) */
3603 case 0x18: /* Grp16 (prefetch/nop) */
3605 case 0x20: /* mov cr, reg */
3606 switch (c->modrm_reg) {
3613 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3615 case 0x21: /* mov from dr to reg */
3616 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3617 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3621 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
3623 case 0x22: /* mov reg, cr */
3624 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3625 emulate_gp(ctxt, 0);
3628 c->dst.type = OP_NONE;
3630 case 0x23: /* mov from reg to dr */
3631 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3632 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3637 if (ops->set_dr(c->modrm_reg, c->src.val &
3638 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3639 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3640 /* #UD condition is already handled by the code above */
3641 emulate_gp(ctxt, 0);
3645 c->dst.type = OP_NONE; /* no writeback */
3649 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3650 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3651 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3652 emulate_gp(ctxt, 0);
3655 rc = X86EMUL_CONTINUE;
3659 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3660 emulate_gp(ctxt, 0);
3663 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
3664 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
3666 rc = X86EMUL_CONTINUE;
3668 case 0x34: /* sysenter */
3669 rc = emulate_sysenter(ctxt, ops);
3670 if (rc != X86EMUL_CONTINUE)
3675 case 0x35: /* sysexit */
3676 rc = emulate_sysexit(ctxt, ops);
3677 if (rc != X86EMUL_CONTINUE)
3682 case 0x40 ... 0x4f: /* cmov */
3683 c->dst.val = c->dst.orig_val = c->src.val;
3684 if (!test_cc(c->b, ctxt->eflags))
3685 c->dst.type = OP_NONE; /* no writeback */
3687 case 0x80 ... 0x8f: /* jnz rel, etc*/
3688 if (test_cc(c->b, ctxt->eflags))
3689 jmp_rel(c, c->src.val);
3691 case 0x90 ... 0x9f: /* setcc r/m8 */
3692 c->dst.val = test_cc(c->b, ctxt->eflags);
3694 case 0xa0: /* push fs */
3695 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3697 case 0xa1: /* pop fs */
3698 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3699 if (rc != X86EMUL_CONTINUE)
3704 c->dst.type = OP_NONE;
3705 /* only subword offset */
3706 c->src.val &= (c->dst.bytes << 3) - 1;
3707 emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3709 case 0xa4: /* shld imm8, r, r/m */
3710 case 0xa5: /* shld cl, r, r/m */
3711 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3713 case 0xa8: /* push gs */
3714 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3716 case 0xa9: /* pop gs */
3717 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3718 if (rc != X86EMUL_CONTINUE)
3723 emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3725 case 0xac: /* shrd imm8, r, r/m */
3726 case 0xad: /* shrd cl, r, r/m */
3727 emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
3729 case 0xae: /* clflush */
3731 case 0xb0 ... 0xb1: /* cmpxchg */
3733 * Save real source value, then compare EAX against
3736 c->src.orig_val = c->src.val;
3737 c->src.val = c->regs[VCPU_REGS_RAX];
3738 emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
3739 if (ctxt->eflags & EFLG_ZF) {
3740 /* Success: write back to memory. */
3741 c->dst.val = c->src.orig_val;
3743 /* Failure: write the value we saw to EAX. */
3744 c->dst.type = OP_REG;
3745 c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
3748 case 0xb2: /* lss */
3749 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
3750 if (rc != X86EMUL_CONTINUE)
3755 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
3757 case 0xb4: /* lfs */
3758 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
3759 if (rc != X86EMUL_CONTINUE)
3762 case 0xb5: /* lgs */
3763 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
3764 if (rc != X86EMUL_CONTINUE)
3767 case 0xb6 ... 0xb7: /* movzx */
3768 c->dst.bytes = c->op_bytes;
3769 c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
3772 case 0xba: /* Grp8 */
3773 switch (c->modrm_reg & 3) {
3786 emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3788 case 0xbc: { /* bsf */
3790 __asm__ ("bsf %2, %0; setz %1"
3791 : "=r"(c->dst.val), "=q"(zf)
3793 ctxt->eflags &= ~X86_EFLAGS_ZF;
3795 ctxt->eflags |= X86_EFLAGS_ZF;
3796 c->dst.type = OP_NONE; /* Disable writeback. */
3800 case 0xbd: { /* bsr */
3802 __asm__ ("bsr %2, %0; setz %1"
3803 : "=r"(c->dst.val), "=q"(zf)
3805 ctxt->eflags &= ~X86_EFLAGS_ZF;
3807 ctxt->eflags |= X86_EFLAGS_ZF;
3808 c->dst.type = OP_NONE; /* Disable writeback. */
3812 case 0xbe ... 0xbf: /* movsx */
3813 c->dst.bytes = c->op_bytes;
3814 c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
3817 case 0xc0 ... 0xc1: /* xadd */
3818 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
3819 /* Write back the register source. */
3820 c->src.val = c->dst.orig_val;
3821 write_register_operand(&c->src);
3823 case 0xc3: /* movnti */
3824 c->dst.bytes = c->op_bytes;
3825 c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
3828 case 0xc7: /* Grp9 (cmpxchg8b) */
3829 rc = emulate_grp9(ctxt, ops);
3830 if (rc != X86EMUL_CONTINUE)
3834 goto cannot_emulate;
3839 DPRINTF("Cannot emulate %02x\n", c->b);