This source file includes following definitions.
- reg_read
- reg_write
- reg_rmw
- writeback_registers
- invalidate_registers
- emulator_check_intercept
- assign_masked
- assign_register
- ad_mask
- stack_mask
- stack_size
- address_mask
- register_address
- masked_increment
- register_address_increment
- rsp_increment
- desc_limit_scaled
- seg_base
- emulate_exception
- emulate_db
- emulate_gp
- emulate_ss
- emulate_ud
- emulate_ts
- emulate_de
- emulate_nm
- get_segment_selector
- set_segment_selector
- insn_alignment
- __linearize
- linearize
- assign_eip
- assign_eip_near
- assign_eip_far
- jmp_rel
- linear_read_system
- linear_write_system
- segmented_read_std
- segmented_write_std
- __do_insn_fetch_bytes
- do_insn_fetch_bytes
- decode_register
- read_descriptor
- em_bsf_c
- em_bsr_c
- test_cc
- fetch_register_operand
- emulator_get_fpu
- emulator_put_fpu
- read_sse_reg
- write_sse_reg
- read_mmx_reg
- write_mmx_reg
- em_fninit
- em_fnstcw
- em_fnstsw
- decode_register_operand
- adjust_modrm_seg
- decode_modrm
- decode_abs
- fetch_bit_operand
- read_emulated
- segmented_read
- segmented_write
- segmented_cmpxchg
- pio_in_emulated
- read_interrupt_descriptor
- get_descriptor_table_ptr
- get_descriptor_ptr
- read_segment_descriptor
- write_segment_descriptor
- __load_segment_descriptor
- load_segment_descriptor
- write_register_operand
- writeback
- push
- em_push
- emulate_pop
- em_pop
- emulate_popf
- em_popf
- em_enter
- em_leave
- em_push_sreg
- em_pop_sreg
- em_pusha
- em_pushf
- em_popa
- __emulate_int_real
- emulate_int_real
- emulate_int
- emulate_iret_real
- em_iret
- em_jmp_far
- em_jmp_abs
- em_call_near_abs
- em_cmpxchg8b
- em_ret
- em_ret_far
- em_ret_far_imm
- em_cmpxchg
- em_lseg
- emulator_has_longmode
- rsm_set_desc_flags
- rsm_load_seg_32
- rsm_load_seg_64
- rsm_enter_protected_mode
- rsm_load_state_32
- rsm_load_state_64
- em_rsm
- setup_syscalls_segments
- vendor_intel
- em_syscall_is_enabled
- em_syscall
- em_sysenter
- em_sysexit
- emulator_bad_iopl
- emulator_io_port_access_allowed
- emulator_io_permited
- string_registers_quirk
- save_state_to_tss16
- load_state_from_tss16
- task_switch_16
- save_state_to_tss32
- load_state_from_tss32
- task_switch_32
- emulator_do_task_switch
- emulator_task_switch
- string_addr_inc
- em_das
- em_aam
- em_aad
- em_call
- em_call_far
- em_ret_near_imm
- em_xchg
- em_imul_3op
- em_cwd
- em_rdpid
- em_rdtsc
- em_rdpmc
- em_mov
- em_movbe
- em_cr_write
- em_dr_write
- em_wrmsr
- em_rdmsr
- em_store_sreg
- em_mov_rm_sreg
- em_mov_sreg_rm
- em_sldt
- em_lldt
- em_str
- em_ltr
- em_invlpg
- em_clts
- em_hypercall
- emulate_store_desc_ptr
- em_sgdt
- em_sidt
- em_lgdt_lidt
- em_lgdt
- em_lidt
- em_smsw
- em_lmsw
- em_loop
- em_jcxz
- em_in
- em_out
- em_cli
- em_sti
- em_cpuid
- em_sahf
- em_lahf
- em_bswap
- em_clflush
- em_movsxd
- check_fxsr
- __fxstate_size
- fxstate_size
- em_fxsave
- fxregs_fixup
- em_fxrstor
- em_xsetbv
- valid_cr
- check_cr_read
- check_cr_write
- check_dr7_gd
- check_dr_read
- check_dr_write
- check_svme
- check_svme_pa
- check_rdtsc
- check_rdpmc
- check_perm_in
- check_perm_out
- imm_size
- decode_imm
- decode_operand
- x86_decode_insn
- x86_page_table_writing_insn
- string_insn_completed
- flush_pending_x87_faults
- fetch_possible_mmx_operand
- fastop
- init_decode_cache
- x86_emulate_insn
- emulator_invalidate_register_cache
- emulator_writeback_register_cache
- emulator_can_use_gpa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include <asm/kvm_emulate.h>
24 #include <linux/stringify.h>
25 #include <asm/fpu/api.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
28
29 #include "x86.h"
30 #include "tss.h"
31 #include "mmu.h"
32 #include "pmu.h"
33
34
35
36
37 #define OpNone 0ull
38 #define OpImplicit 1ull
39 #define OpReg 2ull
40 #define OpMem 3ull
41 #define OpAcc 4ull
42 #define OpDI 5ull
43 #define OpMem64 6ull
44 #define OpImmUByte 7ull
45 #define OpDX 8ull
46 #define OpCL 9ull
47 #define OpImmByte 10ull
48 #define OpOne 11ull
49 #define OpImm 12ull
50 #define OpMem16 13ull
51 #define OpMem32 14ull
52 #define OpImmU 15ull
53 #define OpSI 16ull
54 #define OpImmFAddr 17ull
55 #define OpMemFAddr 18ull
56 #define OpImmU16 19ull
57 #define OpES 20ull
58 #define OpCS 21ull
59 #define OpSS 22ull
60 #define OpDS 23ull
61 #define OpFS 24ull
62 #define OpGS 25ull
63 #define OpMem8 26ull
64 #define OpImm64 27ull
65 #define OpXLat 28ull
66 #define OpAccLo 29ull
67 #define OpAccHi 30ull
68
69 #define OpBits 5
70 #define OpMask ((1ull << OpBits) - 1)
71
72
73
74
75
76
77
78
79
80
81
82 #define ByteOp (1<<0)
83
84 #define DstShift 1
85 #define ImplicitOps (OpImplicit << DstShift)
86 #define DstReg (OpReg << DstShift)
87 #define DstMem (OpMem << DstShift)
88 #define DstAcc (OpAcc << DstShift)
89 #define DstDI (OpDI << DstShift)
90 #define DstMem64 (OpMem64 << DstShift)
91 #define DstMem16 (OpMem16 << DstShift)
92 #define DstImmUByte (OpImmUByte << DstShift)
93 #define DstDX (OpDX << DstShift)
94 #define DstAccLo (OpAccLo << DstShift)
95 #define DstMask (OpMask << DstShift)
96
97 #define SrcShift 6
98 #define SrcNone (OpNone << SrcShift)
99 #define SrcReg (OpReg << SrcShift)
100 #define SrcMem (OpMem << SrcShift)
101 #define SrcMem16 (OpMem16 << SrcShift)
102 #define SrcMem32 (OpMem32 << SrcShift)
103 #define SrcImm (OpImm << SrcShift)
104 #define SrcImmByte (OpImmByte << SrcShift)
105 #define SrcOne (OpOne << SrcShift)
106 #define SrcImmUByte (OpImmUByte << SrcShift)
107 #define SrcImmU (OpImmU << SrcShift)
108 #define SrcSI (OpSI << SrcShift)
109 #define SrcXLat (OpXLat << SrcShift)
110 #define SrcImmFAddr (OpImmFAddr << SrcShift)
111 #define SrcMemFAddr (OpMemFAddr << SrcShift)
112 #define SrcAcc (OpAcc << SrcShift)
113 #define SrcImmU16 (OpImmU16 << SrcShift)
114 #define SrcImm64 (OpImm64 << SrcShift)
115 #define SrcDX (OpDX << SrcShift)
116 #define SrcMem8 (OpMem8 << SrcShift)
117 #define SrcAccHi (OpAccHi << SrcShift)
118 #define SrcMask (OpMask << SrcShift)
119 #define BitOp (1<<11)
120 #define MemAbs (1<<12)
121 #define String (1<<13)
122 #define Stack (1<<14)
123 #define GroupMask (7<<15)
124 #define Group (1<<15)
125 #define GroupDual (2<<15)
126 #define Prefix (3<<15)
127 #define RMExt (4<<15)
128 #define Escape (5<<15)
129 #define InstrDual (6<<15)
130 #define ModeDual (7<<15)
131 #define Sse (1<<18)
132
133 #define ModRM (1<<19)
134
135 #define Mov (1<<20)
136
137 #define Prot (1<<21)
138 #define EmulateOnUD (1<<22)
139 #define NoAccess (1<<23)
140 #define Op3264 (1<<24)
141 #define Undefined (1<<25)
142 #define Lock (1<<26)
143 #define Priv (1<<27)
144 #define No64 (1<<28)
145 #define PageTable (1 << 29)
146 #define NotImpl (1 << 30)
147
148 #define Src2Shift (31)
149 #define Src2None (OpNone << Src2Shift)
150 #define Src2Mem (OpMem << Src2Shift)
151 #define Src2CL (OpCL << Src2Shift)
152 #define Src2ImmByte (OpImmByte << Src2Shift)
153 #define Src2One (OpOne << Src2Shift)
154 #define Src2Imm (OpImm << Src2Shift)
155 #define Src2ES (OpES << Src2Shift)
156 #define Src2CS (OpCS << Src2Shift)
157 #define Src2SS (OpSS << Src2Shift)
158 #define Src2DS (OpDS << Src2Shift)
159 #define Src2FS (OpFS << Src2Shift)
160 #define Src2GS (OpGS << Src2Shift)
161 #define Src2Mask (OpMask << Src2Shift)
162 #define Mmx ((u64)1 << 40)
163 #define AlignMask ((u64)7 << 41)
164 #define Aligned ((u64)1 << 41)
165 #define Unaligned ((u64)2 << 41)
166 #define Avx ((u64)3 << 41)
167 #define Aligned16 ((u64)4 << 41)
168 #define Fastop ((u64)1 << 44)
169 #define NoWrite ((u64)1 << 45)
170 #define SrcWrite ((u64)1 << 46)
171 #define NoMod ((u64)1 << 47)
172 #define Intercept ((u64)1 << 48)
173 #define CheckPerm ((u64)1 << 49)
174 #define PrivUD ((u64)1 << 51)
175 #define NearBranch ((u64)1 << 52)
176 #define No16 ((u64)1 << 53)
177 #define IncSP ((u64)1 << 54)
178 #define TwoMemOp ((u64)1 << 55)
179
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
181
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
190
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 struct fastop;
212
213 struct opcode {
214 u64 flags : 56;
215 u64 intercept : 8;
216 union {
217 int (*execute)(struct x86_emulate_ctxt *ctxt);
218 const struct opcode *group;
219 const struct group_dual *gdual;
220 const struct gprefix *gprefix;
221 const struct escape *esc;
222 const struct instr_dual *idual;
223 const struct mode_dual *mdual;
224 void (*fastop)(struct fastop *fake);
225 } u;
226 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
227 };
228
229 struct group_dual {
230 struct opcode mod012[8];
231 struct opcode mod3[8];
232 };
233
234 struct gprefix {
235 struct opcode pfx_no;
236 struct opcode pfx_66;
237 struct opcode pfx_f2;
238 struct opcode pfx_f3;
239 };
240
241 struct escape {
242 struct opcode op[8];
243 struct opcode high[64];
244 };
245
246 struct instr_dual {
247 struct opcode mod012;
248 struct opcode mod3;
249 };
250
251 struct mode_dual {
252 struct opcode mode32;
253 struct opcode mode64;
254 };
255
256 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
257
258 enum x86_transfer_type {
259 X86_TRANSFER_NONE,
260 X86_TRANSFER_CALL_JMP,
261 X86_TRANSFER_RET,
262 X86_TRANSFER_TASK_SWITCH,
263 };
264
265 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 {
267 if (!(ctxt->regs_valid & (1 << nr))) {
268 ctxt->regs_valid |= 1 << nr;
269 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
270 }
271 return ctxt->_regs[nr];
272 }
273
274 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 {
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->regs_dirty |= 1 << nr;
278 return &ctxt->_regs[nr];
279 }
280
281 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
282 {
283 reg_read(ctxt, nr);
284 return reg_write(ctxt, nr);
285 }
286
287 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
288 {
289 unsigned reg;
290
291 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
292 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
293 }
294
295 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
296 {
297 ctxt->regs_dirty = 0;
298 ctxt->regs_valid = 0;
299 }
300
301
302
303
304
305 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
306 X86_EFLAGS_PF|X86_EFLAGS_CF)
307
308 #ifdef CONFIG_X86_64
309 #define ON64(x) x
310 #else
311 #define ON64(x)
312 #endif
313
314 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
315
316 #define __FOP_FUNC(name) \
317 ".align " __stringify(FASTOP_SIZE) " \n\t" \
318 ".type " name ", @function \n\t" \
319 name ":\n\t"
320
321 #define FOP_FUNC(name) \
322 __FOP_FUNC(#name)
323
324 #define __FOP_RET(name) \
325 "ret \n\t" \
326 ".size " name ", .-" name "\n\t"
327
328 #define FOP_RET(name) \
329 __FOP_RET(#name)
330
331 #define FOP_START(op) \
332 extern void em_##op(struct fastop *fake); \
333 asm(".pushsection .text, \"ax\" \n\t" \
334 ".global em_" #op " \n\t" \
335 ".align " __stringify(FASTOP_SIZE) " \n\t" \
336 "em_" #op ":\n\t"
337
338 #define FOP_END \
339 ".popsection")
340
341 #define __FOPNOP(name) \
342 __FOP_FUNC(name) \
343 __FOP_RET(name)
344
345 #define FOPNOP() \
346 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
347
348 #define FOP1E(op, dst) \
349 __FOP_FUNC(#op "_" #dst) \
350 "10: " #op " %" #dst " \n\t" \
351 __FOP_RET(#op "_" #dst)
352
353 #define FOP1EEX(op, dst) \
354 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
355
356 #define FASTOP1(op) \
357 FOP_START(op) \
358 FOP1E(op##b, al) \
359 FOP1E(op##w, ax) \
360 FOP1E(op##l, eax) \
361 ON64(FOP1E(op##q, rax)) \
362 FOP_END
363
364
365 #define FASTOP1SRC2(op, name) \
366 FOP_START(name) \
367 FOP1E(op, cl) \
368 FOP1E(op, cx) \
369 FOP1E(op, ecx) \
370 ON64(FOP1E(op, rcx)) \
371 FOP_END
372
373
374 #define FASTOP1SRC2EX(op, name) \
375 FOP_START(name) \
376 FOP1EEX(op, cl) \
377 FOP1EEX(op, cx) \
378 FOP1EEX(op, ecx) \
379 ON64(FOP1EEX(op, rcx)) \
380 FOP_END
381
382 #define FOP2E(op, dst, src) \
383 __FOP_FUNC(#op "_" #dst "_" #src) \
384 #op " %" #src ", %" #dst " \n\t" \
385 __FOP_RET(#op "_" #dst "_" #src)
386
387 #define FASTOP2(op) \
388 FOP_START(op) \
389 FOP2E(op##b, al, dl) \
390 FOP2E(op##w, ax, dx) \
391 FOP2E(op##l, eax, edx) \
392 ON64(FOP2E(op##q, rax, rdx)) \
393 FOP_END
394
395
396 #define FASTOP2W(op) \
397 FOP_START(op) \
398 FOPNOP() \
399 FOP2E(op##w, ax, dx) \
400 FOP2E(op##l, eax, edx) \
401 ON64(FOP2E(op##q, rax, rdx)) \
402 FOP_END
403
404
405 #define FASTOP2CL(op) \
406 FOP_START(op) \
407 FOP2E(op##b, al, cl) \
408 FOP2E(op##w, ax, cl) \
409 FOP2E(op##l, eax, cl) \
410 ON64(FOP2E(op##q, rax, cl)) \
411 FOP_END
412
413
414 #define FASTOP2R(op, name) \
415 FOP_START(name) \
416 FOP2E(op##b, dl, al) \
417 FOP2E(op##w, dx, ax) \
418 FOP2E(op##l, edx, eax) \
419 ON64(FOP2E(op##q, rdx, rax)) \
420 FOP_END
421
422 #define FOP3E(op, dst, src, src2) \
423 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
424 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
425 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
426
427
428 #define FASTOP3WCL(op) \
429 FOP_START(op) \
430 FOPNOP() \
431 FOP3E(op##w, ax, dx, cl) \
432 FOP3E(op##l, eax, edx, cl) \
433 ON64(FOP3E(op##q, rax, rdx, cl)) \
434 FOP_END
435
436
437 #define FOP_SETCC(op) \
438 ".align 4 \n\t" \
439 ".type " #op ", @function \n\t" \
440 #op ": \n\t" \
441 #op " %al \n\t" \
442 __FOP_RET(#op)
443
444 asm(".pushsection .fixup, \"ax\"\n"
445 ".global kvm_fastop_exception \n"
446 "kvm_fastop_exception: xor %esi, %esi; ret\n"
447 ".popsection");
448
449 FOP_START(setcc)
450 FOP_SETCC(seto)
451 FOP_SETCC(setno)
452 FOP_SETCC(setc)
453 FOP_SETCC(setnc)
454 FOP_SETCC(setz)
455 FOP_SETCC(setnz)
456 FOP_SETCC(setbe)
457 FOP_SETCC(setnbe)
458 FOP_SETCC(sets)
459 FOP_SETCC(setns)
460 FOP_SETCC(setp)
461 FOP_SETCC(setnp)
462 FOP_SETCC(setl)
463 FOP_SETCC(setnl)
464 FOP_SETCC(setle)
465 FOP_SETCC(setnle)
466 FOP_END;
467
468 FOP_START(salc)
469 FOP_FUNC(salc)
470 "pushf; sbb %al, %al; popf \n\t"
471 FOP_RET(salc)
472 FOP_END;
473
474
475
476
477
478 #define asm_safe(insn, inoutclob...) \
479 ({ \
480 int _fault = 0; \
481 \
482 asm volatile("1:" insn "\n" \
483 "2:\n" \
484 ".pushsection .fixup, \"ax\"\n" \
485 "3: movl $1, %[_fault]\n" \
486 " jmp 2b\n" \
487 ".popsection\n" \
488 _ASM_EXTABLE(1b, 3b) \
489 : [_fault] "+qm"(_fault) inoutclob ); \
490 \
491 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
492 })
493
494 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
495 enum x86_intercept intercept,
496 enum x86_intercept_stage stage)
497 {
498 struct x86_instruction_info info = {
499 .intercept = intercept,
500 .rep_prefix = ctxt->rep_prefix,
501 .modrm_mod = ctxt->modrm_mod,
502 .modrm_reg = ctxt->modrm_reg,
503 .modrm_rm = ctxt->modrm_rm,
504 .src_val = ctxt->src.val64,
505 .dst_val = ctxt->dst.val64,
506 .src_bytes = ctxt->src.bytes,
507 .dst_bytes = ctxt->dst.bytes,
508 .ad_bytes = ctxt->ad_bytes,
509 .next_rip = ctxt->eip,
510 };
511
512 return ctxt->ops->intercept(ctxt, &info, stage);
513 }
514
515 static void assign_masked(ulong *dest, ulong src, ulong mask)
516 {
517 *dest = (*dest & ~mask) | (src & mask);
518 }
519
520 static void assign_register(unsigned long *reg, u64 val, int bytes)
521 {
522
523 switch (bytes) {
524 case 1:
525 *(u8 *)reg = (u8)val;
526 break;
527 case 2:
528 *(u16 *)reg = (u16)val;
529 break;
530 case 4:
531 *reg = (u32)val;
532 break;
533 case 8:
534 *reg = val;
535 break;
536 }
537 }
538
539 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
540 {
541 return (1UL << (ctxt->ad_bytes << 3)) - 1;
542 }
543
544 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
545 {
546 u16 sel;
547 struct desc_struct ss;
548
549 if (ctxt->mode == X86EMUL_MODE_PROT64)
550 return ~0UL;
551 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
552 return ~0U >> ((ss.d ^ 1) * 16);
553 }
554
555 static int stack_size(struct x86_emulate_ctxt *ctxt)
556 {
557 return (__fls(stack_mask(ctxt)) + 1) >> 3;
558 }
559
560
561 static inline unsigned long
562 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
563 {
564 if (ctxt->ad_bytes == sizeof(unsigned long))
565 return reg;
566 else
567 return reg & ad_mask(ctxt);
568 }
569
570 static inline unsigned long
571 register_address(struct x86_emulate_ctxt *ctxt, int reg)
572 {
573 return address_mask(ctxt, reg_read(ctxt, reg));
574 }
575
576 static void masked_increment(ulong *reg, ulong mask, int inc)
577 {
578 assign_masked(reg, *reg + inc, mask);
579 }
580
581 static inline void
582 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
583 {
584 ulong *preg = reg_rmw(ctxt, reg);
585
586 assign_register(preg, *preg + inc, ctxt->ad_bytes);
587 }
588
589 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
590 {
591 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
592 }
593
594 static u32 desc_limit_scaled(struct desc_struct *desc)
595 {
596 u32 limit = get_desc_limit(desc);
597
598 return desc->g ? (limit << 12) | 0xfff : limit;
599 }
600
601 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
602 {
603 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
604 return 0;
605
606 return ctxt->ops->get_cached_segment_base(ctxt, seg);
607 }
608
609 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
610 u32 error, bool valid)
611 {
612 WARN_ON(vec > 0x1f);
613 ctxt->exception.vector = vec;
614 ctxt->exception.error_code = error;
615 ctxt->exception.error_code_valid = valid;
616 return X86EMUL_PROPAGATE_FAULT;
617 }
618
619 static int emulate_db(struct x86_emulate_ctxt *ctxt)
620 {
621 return emulate_exception(ctxt, DB_VECTOR, 0, false);
622 }
623
624 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
625 {
626 return emulate_exception(ctxt, GP_VECTOR, err, true);
627 }
628
629 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
630 {
631 return emulate_exception(ctxt, SS_VECTOR, err, true);
632 }
633
634 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
635 {
636 return emulate_exception(ctxt, UD_VECTOR, 0, false);
637 }
638
639 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
640 {
641 return emulate_exception(ctxt, TS_VECTOR, err, true);
642 }
643
644 static int emulate_de(struct x86_emulate_ctxt *ctxt)
645 {
646 return emulate_exception(ctxt, DE_VECTOR, 0, false);
647 }
648
649 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
650 {
651 return emulate_exception(ctxt, NM_VECTOR, 0, false);
652 }
653
654 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
655 {
656 u16 selector;
657 struct desc_struct desc;
658
659 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
660 return selector;
661 }
662
663 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
664 unsigned seg)
665 {
666 u16 dummy;
667 u32 base3;
668 struct desc_struct desc;
669
670 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
671 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
672 }
673
674
675
676
677
678
679
680
681
682
683 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
684 {
685 u64 alignment = ctxt->d & AlignMask;
686
687 if (likely(size < 16))
688 return 1;
689
690 switch (alignment) {
691 case Unaligned:
692 case Avx:
693 return 1;
694 case Aligned16:
695 return 16;
696 case Aligned:
697 default:
698 return size;
699 }
700 }
701
702 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
703 struct segmented_address addr,
704 unsigned *max_size, unsigned size,
705 bool write, bool fetch,
706 enum x86emul_mode mode, ulong *linear)
707 {
708 struct desc_struct desc;
709 bool usable;
710 ulong la;
711 u32 lim;
712 u16 sel;
713 u8 va_bits;
714
715 la = seg_base(ctxt, addr.seg) + addr.ea;
716 *max_size = 0;
717 switch (mode) {
718 case X86EMUL_MODE_PROT64:
719 *linear = la;
720 va_bits = ctxt_virt_addr_bits(ctxt);
721 if (get_canonical(la, va_bits) != la)
722 goto bad;
723
724 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
725 if (size > *max_size)
726 goto bad;
727 break;
728 default:
729 *linear = la = (u32)la;
730 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
731 addr.seg);
732 if (!usable)
733 goto bad;
734
735 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
736 || !(desc.type & 2)) && write)
737 goto bad;
738
739 if (!fetch && (desc.type & 8) && !(desc.type & 2))
740 goto bad;
741 lim = desc_limit_scaled(&desc);
742 if (!(desc.type & 8) && (desc.type & 4)) {
743
744 if (addr.ea <= lim)
745 goto bad;
746 lim = desc.d ? 0xffffffff : 0xffff;
747 }
748 if (addr.ea > lim)
749 goto bad;
750 if (lim == 0xffffffff)
751 *max_size = ~0u;
752 else {
753 *max_size = (u64)lim + 1 - addr.ea;
754 if (size > *max_size)
755 goto bad;
756 }
757 break;
758 }
759 if (la & (insn_alignment(ctxt, size) - 1))
760 return emulate_gp(ctxt, 0);
761 return X86EMUL_CONTINUE;
762 bad:
763 if (addr.seg == VCPU_SREG_SS)
764 return emulate_ss(ctxt, 0);
765 else
766 return emulate_gp(ctxt, 0);
767 }
768
769 static int linearize(struct x86_emulate_ctxt *ctxt,
770 struct segmented_address addr,
771 unsigned size, bool write,
772 ulong *linear)
773 {
774 unsigned max_size;
775 return __linearize(ctxt, addr, &max_size, size, write, false,
776 ctxt->mode, linear);
777 }
778
779 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
780 enum x86emul_mode mode)
781 {
782 ulong linear;
783 int rc;
784 unsigned max_size;
785 struct segmented_address addr = { .seg = VCPU_SREG_CS,
786 .ea = dst };
787
788 if (ctxt->op_bytes != sizeof(unsigned long))
789 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
790 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
791 if (rc == X86EMUL_CONTINUE)
792 ctxt->_eip = addr.ea;
793 return rc;
794 }
795
796 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
797 {
798 return assign_eip(ctxt, dst, ctxt->mode);
799 }
800
801 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
802 const struct desc_struct *cs_desc)
803 {
804 enum x86emul_mode mode = ctxt->mode;
805 int rc;
806
807 #ifdef CONFIG_X86_64
808 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
809 if (cs_desc->l) {
810 u64 efer = 0;
811
812 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
813 if (efer & EFER_LMA)
814 mode = X86EMUL_MODE_PROT64;
815 } else
816 mode = X86EMUL_MODE_PROT32;
817 }
818 #endif
819 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
820 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
821 rc = assign_eip(ctxt, dst, mode);
822 if (rc == X86EMUL_CONTINUE)
823 ctxt->mode = mode;
824 return rc;
825 }
826
827 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
828 {
829 return assign_eip_near(ctxt, ctxt->_eip + rel);
830 }
831
832 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
833 void *data, unsigned size)
834 {
835 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
836 }
837
838 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
839 ulong linear, void *data,
840 unsigned int size)
841 {
842 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
843 }
844
845 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
846 struct segmented_address addr,
847 void *data,
848 unsigned size)
849 {
850 int rc;
851 ulong linear;
852
853 rc = linearize(ctxt, addr, size, false, &linear);
854 if (rc != X86EMUL_CONTINUE)
855 return rc;
856 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
857 }
858
859 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
860 struct segmented_address addr,
861 void *data,
862 unsigned int size)
863 {
864 int rc;
865 ulong linear;
866
867 rc = linearize(ctxt, addr, size, true, &linear);
868 if (rc != X86EMUL_CONTINUE)
869 return rc;
870 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
871 }
872
873
874
875
876
877 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
878 {
879 int rc;
880 unsigned size, max_size;
881 unsigned long linear;
882 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
883 struct segmented_address addr = { .seg = VCPU_SREG_CS,
884 .ea = ctxt->eip + cur_size };
885
886
887
888
889
890
891
892
893
894
895
896 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
897 &linear);
898 if (unlikely(rc != X86EMUL_CONTINUE))
899 return rc;
900
901 size = min_t(unsigned, 15UL ^ cur_size, max_size);
902 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
903
904
905
906
907
908
909
910 if (unlikely(size < op_size))
911 return emulate_gp(ctxt, 0);
912
913 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
914 size, &ctxt->exception);
915 if (unlikely(rc != X86EMUL_CONTINUE))
916 return rc;
917 ctxt->fetch.end += size;
918 return X86EMUL_CONTINUE;
919 }
920
921 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
922 unsigned size)
923 {
924 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
925
926 if (unlikely(done_size < size))
927 return __do_insn_fetch_bytes(ctxt, size - done_size);
928 else
929 return X86EMUL_CONTINUE;
930 }
931
932
933 #define insn_fetch(_type, _ctxt) \
934 ({ _type _x; \
935 \
936 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
937 if (rc != X86EMUL_CONTINUE) \
938 goto done; \
939 ctxt->_eip += sizeof(_type); \
940 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
941 ctxt->fetch.ptr += sizeof(_type); \
942 _x; \
943 })
944
945 #define insn_fetch_arr(_arr, _size, _ctxt) \
946 ({ \
947 rc = do_insn_fetch_bytes(_ctxt, _size); \
948 if (rc != X86EMUL_CONTINUE) \
949 goto done; \
950 ctxt->_eip += (_size); \
951 memcpy(_arr, ctxt->fetch.ptr, _size); \
952 ctxt->fetch.ptr += (_size); \
953 })
954
955
956
957
958
959
960 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
961 int byteop)
962 {
963 void *p;
964 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
965
966 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
967 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
968 else
969 p = reg_rmw(ctxt, modrm_reg);
970 return p;
971 }
972
973 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
974 struct segmented_address addr,
975 u16 *size, unsigned long *address, int op_bytes)
976 {
977 int rc;
978
979 if (op_bytes == 2)
980 op_bytes = 3;
981 *address = 0;
982 rc = segmented_read_std(ctxt, addr, size, 2);
983 if (rc != X86EMUL_CONTINUE)
984 return rc;
985 addr.ea += 2;
986 rc = segmented_read_std(ctxt, addr, address, op_bytes);
987 return rc;
988 }
989
990 FASTOP2(add);
991 FASTOP2(or);
992 FASTOP2(adc);
993 FASTOP2(sbb);
994 FASTOP2(and);
995 FASTOP2(sub);
996 FASTOP2(xor);
997 FASTOP2(cmp);
998 FASTOP2(test);
999
1000 FASTOP1SRC2(mul, mul_ex);
1001 FASTOP1SRC2(imul, imul_ex);
1002 FASTOP1SRC2EX(div, div_ex);
1003 FASTOP1SRC2EX(idiv, idiv_ex);
1004
1005 FASTOP3WCL(shld);
1006 FASTOP3WCL(shrd);
1007
1008 FASTOP2W(imul);
1009
1010 FASTOP1(not);
1011 FASTOP1(neg);
1012 FASTOP1(inc);
1013 FASTOP1(dec);
1014
1015 FASTOP2CL(rol);
1016 FASTOP2CL(ror);
1017 FASTOP2CL(rcl);
1018 FASTOP2CL(rcr);
1019 FASTOP2CL(shl);
1020 FASTOP2CL(shr);
1021 FASTOP2CL(sar);
1022
1023 FASTOP2W(bsf);
1024 FASTOP2W(bsr);
1025 FASTOP2W(bt);
1026 FASTOP2W(bts);
1027 FASTOP2W(btr);
1028 FASTOP2W(btc);
1029
1030 FASTOP2(xadd);
1031
1032 FASTOP2R(cmp, cmp_r);
1033
1034 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1035 {
1036
1037 if (ctxt->src.val == 0)
1038 ctxt->dst.type = OP_NONE;
1039 return fastop(ctxt, em_bsf);
1040 }
1041
1042 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1043 {
1044
1045 if (ctxt->src.val == 0)
1046 ctxt->dst.type = OP_NONE;
1047 return fastop(ctxt, em_bsr);
1048 }
1049
1050 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1051 {
1052 u8 rc;
1053 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1054
1055 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1056 asm("push %[flags]; popf; " CALL_NOSPEC
1057 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1058 return rc;
1059 }
1060
1061 static void fetch_register_operand(struct operand *op)
1062 {
1063 switch (op->bytes) {
1064 case 1:
1065 op->val = *(u8 *)op->addr.reg;
1066 break;
1067 case 2:
1068 op->val = *(u16 *)op->addr.reg;
1069 break;
1070 case 4:
1071 op->val = *(u32 *)op->addr.reg;
1072 break;
1073 case 8:
1074 op->val = *(u64 *)op->addr.reg;
1075 break;
1076 }
1077 }
1078
1079 static void emulator_get_fpu(void)
1080 {
1081 fpregs_lock();
1082
1083 fpregs_assert_state_consistent();
1084 if (test_thread_flag(TIF_NEED_FPU_LOAD))
1085 switch_fpu_return();
1086 }
1087
1088 static void emulator_put_fpu(void)
1089 {
1090 fpregs_unlock();
1091 }
1092
1093 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1094 {
1095 emulator_get_fpu();
1096 switch (reg) {
1097 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1098 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
1099 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
1100 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
1101 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
1102 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
1103 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
1104 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
1105 #ifdef CONFIG_X86_64
1106 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
1107 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
1108 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
1109 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
1110 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
1111 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
1112 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
1113 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
1114 #endif
1115 default: BUG();
1116 }
1117 emulator_put_fpu();
1118 }
1119
1120 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1121 int reg)
1122 {
1123 emulator_get_fpu();
1124 switch (reg) {
1125 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1126 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
1127 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
1128 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
1129 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
1130 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
1131 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
1132 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
1133 #ifdef CONFIG_X86_64
1134 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
1135 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1136 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1137 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1138 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1139 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1140 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1141 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
1142 #endif
1143 default: BUG();
1144 }
1145 emulator_put_fpu();
1146 }
1147
1148 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1149 {
1150 emulator_get_fpu();
1151 switch (reg) {
1152 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1153 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1154 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1155 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1156 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1157 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1158 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1159 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1160 default: BUG();
1161 }
1162 emulator_put_fpu();
1163 }
1164
1165 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1166 {
1167 emulator_get_fpu();
1168 switch (reg) {
1169 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1170 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1171 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1172 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1173 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1174 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1175 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1176 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1177 default: BUG();
1178 }
1179 emulator_put_fpu();
1180 }
1181
1182 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1183 {
1184 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1185 return emulate_nm(ctxt);
1186
1187 emulator_get_fpu();
1188 asm volatile("fninit");
1189 emulator_put_fpu();
1190 return X86EMUL_CONTINUE;
1191 }
1192
1193 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1194 {
1195 u16 fcw;
1196
1197 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1198 return emulate_nm(ctxt);
1199
1200 emulator_get_fpu();
1201 asm volatile("fnstcw %0": "+m"(fcw));
1202 emulator_put_fpu();
1203
1204 ctxt->dst.val = fcw;
1205
1206 return X86EMUL_CONTINUE;
1207 }
1208
1209 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1210 {
1211 u16 fsw;
1212
1213 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1214 return emulate_nm(ctxt);
1215
1216 emulator_get_fpu();
1217 asm volatile("fnstsw %0": "+m"(fsw));
1218 emulator_put_fpu();
1219
1220 ctxt->dst.val = fsw;
1221
1222 return X86EMUL_CONTINUE;
1223 }
1224
1225 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1226 struct operand *op)
1227 {
1228 unsigned reg = ctxt->modrm_reg;
1229
1230 if (!(ctxt->d & ModRM))
1231 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1232
1233 if (ctxt->d & Sse) {
1234 op->type = OP_XMM;
1235 op->bytes = 16;
1236 op->addr.xmm = reg;
1237 read_sse_reg(ctxt, &op->vec_val, reg);
1238 return;
1239 }
1240 if (ctxt->d & Mmx) {
1241 reg &= 7;
1242 op->type = OP_MM;
1243 op->bytes = 8;
1244 op->addr.mm = reg;
1245 return;
1246 }
1247
1248 op->type = OP_REG;
1249 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1250 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1251
1252 fetch_register_operand(op);
1253 op->orig_val = op->val;
1254 }
1255
1256 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1257 {
1258 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1259 ctxt->modrm_seg = VCPU_SREG_SS;
1260 }
1261
1262 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1263 struct operand *op)
1264 {
1265 u8 sib;
1266 int index_reg, base_reg, scale;
1267 int rc = X86EMUL_CONTINUE;
1268 ulong modrm_ea = 0;
1269
1270 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1271 index_reg = (ctxt->rex_prefix << 2) & 8;
1272 base_reg = (ctxt->rex_prefix << 3) & 8;
1273
1274 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1275 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1276 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1277 ctxt->modrm_seg = VCPU_SREG_DS;
1278
1279 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1280 op->type = OP_REG;
1281 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1282 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1283 ctxt->d & ByteOp);
1284 if (ctxt->d & Sse) {
1285 op->type = OP_XMM;
1286 op->bytes = 16;
1287 op->addr.xmm = ctxt->modrm_rm;
1288 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1289 return rc;
1290 }
1291 if (ctxt->d & Mmx) {
1292 op->type = OP_MM;
1293 op->bytes = 8;
1294 op->addr.mm = ctxt->modrm_rm & 7;
1295 return rc;
1296 }
1297 fetch_register_operand(op);
1298 return rc;
1299 }
1300
1301 op->type = OP_MEM;
1302
1303 if (ctxt->ad_bytes == 2) {
1304 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1305 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1306 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1307 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1308
1309
1310 switch (ctxt->modrm_mod) {
1311 case 0:
1312 if (ctxt->modrm_rm == 6)
1313 modrm_ea += insn_fetch(u16, ctxt);
1314 break;
1315 case 1:
1316 modrm_ea += insn_fetch(s8, ctxt);
1317 break;
1318 case 2:
1319 modrm_ea += insn_fetch(u16, ctxt);
1320 break;
1321 }
1322 switch (ctxt->modrm_rm) {
1323 case 0:
1324 modrm_ea += bx + si;
1325 break;
1326 case 1:
1327 modrm_ea += bx + di;
1328 break;
1329 case 2:
1330 modrm_ea += bp + si;
1331 break;
1332 case 3:
1333 modrm_ea += bp + di;
1334 break;
1335 case 4:
1336 modrm_ea += si;
1337 break;
1338 case 5:
1339 modrm_ea += di;
1340 break;
1341 case 6:
1342 if (ctxt->modrm_mod != 0)
1343 modrm_ea += bp;
1344 break;
1345 case 7:
1346 modrm_ea += bx;
1347 break;
1348 }
1349 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1350 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1351 ctxt->modrm_seg = VCPU_SREG_SS;
1352 modrm_ea = (u16)modrm_ea;
1353 } else {
1354
1355 if ((ctxt->modrm_rm & 7) == 4) {
1356 sib = insn_fetch(u8, ctxt);
1357 index_reg |= (sib >> 3) & 7;
1358 base_reg |= sib & 7;
1359 scale = sib >> 6;
1360
1361 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1362 modrm_ea += insn_fetch(s32, ctxt);
1363 else {
1364 modrm_ea += reg_read(ctxt, base_reg);
1365 adjust_modrm_seg(ctxt, base_reg);
1366
1367 if ((ctxt->d & IncSP) &&
1368 base_reg == VCPU_REGS_RSP)
1369 modrm_ea += ctxt->op_bytes;
1370 }
1371 if (index_reg != 4)
1372 modrm_ea += reg_read(ctxt, index_reg) << scale;
1373 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1374 modrm_ea += insn_fetch(s32, ctxt);
1375 if (ctxt->mode == X86EMUL_MODE_PROT64)
1376 ctxt->rip_relative = 1;
1377 } else {
1378 base_reg = ctxt->modrm_rm;
1379 modrm_ea += reg_read(ctxt, base_reg);
1380 adjust_modrm_seg(ctxt, base_reg);
1381 }
1382 switch (ctxt->modrm_mod) {
1383 case 1:
1384 modrm_ea += insn_fetch(s8, ctxt);
1385 break;
1386 case 2:
1387 modrm_ea += insn_fetch(s32, ctxt);
1388 break;
1389 }
1390 }
1391 op->addr.mem.ea = modrm_ea;
1392 if (ctxt->ad_bytes != 8)
1393 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1394
1395 done:
1396 return rc;
1397 }
1398
1399 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1400 struct operand *op)
1401 {
1402 int rc = X86EMUL_CONTINUE;
1403
1404 op->type = OP_MEM;
1405 switch (ctxt->ad_bytes) {
1406 case 2:
1407 op->addr.mem.ea = insn_fetch(u16, ctxt);
1408 break;
1409 case 4:
1410 op->addr.mem.ea = insn_fetch(u32, ctxt);
1411 break;
1412 case 8:
1413 op->addr.mem.ea = insn_fetch(u64, ctxt);
1414 break;
1415 }
1416 done:
1417 return rc;
1418 }
1419
1420 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1421 {
1422 long sv = 0, mask;
1423
1424 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1425 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1426
1427 if (ctxt->src.bytes == 2)
1428 sv = (s16)ctxt->src.val & (s16)mask;
1429 else if (ctxt->src.bytes == 4)
1430 sv = (s32)ctxt->src.val & (s32)mask;
1431 else
1432 sv = (s64)ctxt->src.val & (s64)mask;
1433
1434 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1435 ctxt->dst.addr.mem.ea + (sv >> 3));
1436 }
1437
1438
1439 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1440 }
1441
1442 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1443 unsigned long addr, void *dest, unsigned size)
1444 {
1445 int rc;
1446 struct read_cache *mc = &ctxt->mem_read;
1447
1448 if (mc->pos < mc->end)
1449 goto read_cached;
1450
1451 WARN_ON((mc->end + size) >= sizeof(mc->data));
1452
1453 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1454 &ctxt->exception);
1455 if (rc != X86EMUL_CONTINUE)
1456 return rc;
1457
1458 mc->end += size;
1459
1460 read_cached:
1461 memcpy(dest, mc->data + mc->pos, size);
1462 mc->pos += size;
1463 return X86EMUL_CONTINUE;
1464 }
1465
1466 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1467 struct segmented_address addr,
1468 void *data,
1469 unsigned size)
1470 {
1471 int rc;
1472 ulong linear;
1473
1474 rc = linearize(ctxt, addr, size, false, &linear);
1475 if (rc != X86EMUL_CONTINUE)
1476 return rc;
1477 return read_emulated(ctxt, linear, data, size);
1478 }
1479
1480 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1481 struct segmented_address addr,
1482 const void *data,
1483 unsigned size)
1484 {
1485 int rc;
1486 ulong linear;
1487
1488 rc = linearize(ctxt, addr, size, true, &linear);
1489 if (rc != X86EMUL_CONTINUE)
1490 return rc;
1491 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1492 &ctxt->exception);
1493 }
1494
1495 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1496 struct segmented_address addr,
1497 const void *orig_data, const void *data,
1498 unsigned size)
1499 {
1500 int rc;
1501 ulong linear;
1502
1503 rc = linearize(ctxt, addr, size, true, &linear);
1504 if (rc != X86EMUL_CONTINUE)
1505 return rc;
1506 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1507 size, &ctxt->exception);
1508 }
1509
1510 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1511 unsigned int size, unsigned short port,
1512 void *dest)
1513 {
1514 struct read_cache *rc = &ctxt->io_read;
1515
1516 if (rc->pos == rc->end) {
1517 unsigned int in_page, n;
1518 unsigned int count = ctxt->rep_prefix ?
1519 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1520 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1521 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1522 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1523 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1524 if (n == 0)
1525 n = 1;
1526 rc->pos = rc->end = 0;
1527 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1528 return 0;
1529 rc->end = n * size;
1530 }
1531
1532 if (ctxt->rep_prefix && (ctxt->d & String) &&
1533 !(ctxt->eflags & X86_EFLAGS_DF)) {
1534 ctxt->dst.data = rc->data + rc->pos;
1535 ctxt->dst.type = OP_MEM_STR;
1536 ctxt->dst.count = (rc->end - rc->pos) / size;
1537 rc->pos = rc->end;
1538 } else {
1539 memcpy(dest, rc->data + rc->pos, size);
1540 rc->pos += size;
1541 }
1542 return 1;
1543 }
1544
1545 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1546 u16 index, struct desc_struct *desc)
1547 {
1548 struct desc_ptr dt;
1549 ulong addr;
1550
1551 ctxt->ops->get_idt(ctxt, &dt);
1552
1553 if (dt.size < index * 8 + 7)
1554 return emulate_gp(ctxt, index << 3 | 0x2);
1555
1556 addr = dt.address + index * 8;
1557 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1558 }
1559
1560 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1561 u16 selector, struct desc_ptr *dt)
1562 {
1563 const struct x86_emulate_ops *ops = ctxt->ops;
1564 u32 base3 = 0;
1565
1566 if (selector & 1 << 2) {
1567 struct desc_struct desc;
1568 u16 sel;
1569
1570 memset(dt, 0, sizeof(*dt));
1571 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1572 VCPU_SREG_LDTR))
1573 return;
1574
1575 dt->size = desc_limit_scaled(&desc);
1576 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1577 } else
1578 ops->get_gdt(ctxt, dt);
1579 }
1580
1581 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1582 u16 selector, ulong *desc_addr_p)
1583 {
1584 struct desc_ptr dt;
1585 u16 index = selector >> 3;
1586 ulong addr;
1587
1588 get_descriptor_table_ptr(ctxt, selector, &dt);
1589
1590 if (dt.size < index * 8 + 7)
1591 return emulate_gp(ctxt, selector & 0xfffc);
1592
1593 addr = dt.address + index * 8;
1594
1595 #ifdef CONFIG_X86_64
1596 if (addr >> 32 != 0) {
1597 u64 efer = 0;
1598
1599 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1600 if (!(efer & EFER_LMA))
1601 addr &= (u32)-1;
1602 }
1603 #endif
1604
1605 *desc_addr_p = addr;
1606 return X86EMUL_CONTINUE;
1607 }
1608
1609
1610 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1611 u16 selector, struct desc_struct *desc,
1612 ulong *desc_addr_p)
1613 {
1614 int rc;
1615
1616 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1617 if (rc != X86EMUL_CONTINUE)
1618 return rc;
1619
1620 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1621 }
1622
1623
1624 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1625 u16 selector, struct desc_struct *desc)
1626 {
1627 int rc;
1628 ulong addr;
1629
1630 rc = get_descriptor_ptr(ctxt, selector, &addr);
1631 if (rc != X86EMUL_CONTINUE)
1632 return rc;
1633
1634 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1635 }
1636
1637 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1638 u16 selector, int seg, u8 cpl,
1639 enum x86_transfer_type transfer,
1640 struct desc_struct *desc)
1641 {
1642 struct desc_struct seg_desc, old_desc;
1643 u8 dpl, rpl;
1644 unsigned err_vec = GP_VECTOR;
1645 u32 err_code = 0;
1646 bool null_selector = !(selector & ~0x3);
1647 ulong desc_addr;
1648 int ret;
1649 u16 dummy;
1650 u32 base3 = 0;
1651
1652 memset(&seg_desc, 0, sizeof(seg_desc));
1653
1654 if (ctxt->mode == X86EMUL_MODE_REAL) {
1655
1656
1657 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1658 set_desc_base(&seg_desc, selector << 4);
1659 goto load;
1660 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1661
1662 set_desc_base(&seg_desc, selector << 4);
1663 set_desc_limit(&seg_desc, 0xffff);
1664 seg_desc.type = 3;
1665 seg_desc.p = 1;
1666 seg_desc.s = 1;
1667 seg_desc.dpl = 3;
1668 goto load;
1669 }
1670
1671 rpl = selector & 3;
1672
1673
1674 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1675 goto exception;
1676
1677
1678 if (null_selector) {
1679 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1680 goto exception;
1681
1682 if (seg == VCPU_SREG_SS) {
1683 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1684 goto exception;
1685
1686
1687
1688
1689
1690 seg_desc.type = 3;
1691 seg_desc.p = 1;
1692 seg_desc.s = 1;
1693 seg_desc.dpl = cpl;
1694 seg_desc.d = 1;
1695 seg_desc.g = 1;
1696 }
1697
1698
1699 goto load;
1700 }
1701
1702 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1703 if (ret != X86EMUL_CONTINUE)
1704 return ret;
1705
1706 err_code = selector & 0xfffc;
1707 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1708 GP_VECTOR;
1709
1710
1711 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1712 if (transfer == X86_TRANSFER_CALL_JMP)
1713 return X86EMUL_UNHANDLEABLE;
1714 goto exception;
1715 }
1716
1717 if (!seg_desc.p) {
1718 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1719 goto exception;
1720 }
1721
1722 dpl = seg_desc.dpl;
1723
1724 switch (seg) {
1725 case VCPU_SREG_SS:
1726
1727
1728
1729
1730 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1731 goto exception;
1732 break;
1733 case VCPU_SREG_CS:
1734 if (!(seg_desc.type & 8))
1735 goto exception;
1736
1737 if (seg_desc.type & 4) {
1738
1739 if (dpl > cpl)
1740 goto exception;
1741 } else {
1742
1743 if (rpl > cpl || dpl != cpl)
1744 goto exception;
1745 }
1746
1747 if (seg_desc.d && seg_desc.l) {
1748 u64 efer = 0;
1749
1750 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1751 if (efer & EFER_LMA)
1752 goto exception;
1753 }
1754
1755
1756 selector = (selector & 0xfffc) | cpl;
1757 break;
1758 case VCPU_SREG_TR:
1759 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1760 goto exception;
1761 old_desc = seg_desc;
1762 seg_desc.type |= 2;
1763 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1764 sizeof(seg_desc), &ctxt->exception);
1765 if (ret != X86EMUL_CONTINUE)
1766 return ret;
1767 break;
1768 case VCPU_SREG_LDTR:
1769 if (seg_desc.s || seg_desc.type != 2)
1770 goto exception;
1771 break;
1772 default:
1773
1774
1775
1776
1777
1778 if ((seg_desc.type & 0xa) == 0x8 ||
1779 (((seg_desc.type & 0xc) != 0xc) &&
1780 (rpl > dpl && cpl > dpl)))
1781 goto exception;
1782 break;
1783 }
1784
1785 if (seg_desc.s) {
1786
1787 if (!(seg_desc.type & 1)) {
1788 seg_desc.type |= 1;
1789 ret = write_segment_descriptor(ctxt, selector,
1790 &seg_desc);
1791 if (ret != X86EMUL_CONTINUE)
1792 return ret;
1793 }
1794 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1795 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1796 if (ret != X86EMUL_CONTINUE)
1797 return ret;
1798 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1799 ((u64)base3 << 32), ctxt))
1800 return emulate_gp(ctxt, 0);
1801 }
1802 load:
1803 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1804 if (desc)
1805 *desc = seg_desc;
1806 return X86EMUL_CONTINUE;
1807 exception:
1808 return emulate_exception(ctxt, err_vec, err_code, true);
1809 }
1810
1811 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1812 u16 selector, int seg)
1813 {
1814 u8 cpl = ctxt->ops->cpl(ctxt);
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 if (seg == VCPU_SREG_SS && selector == 3 &&
1827 ctxt->mode == X86EMUL_MODE_PROT64)
1828 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1829
1830 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1831 X86_TRANSFER_NONE, NULL);
1832 }
1833
1834 static void write_register_operand(struct operand *op)
1835 {
1836 return assign_register(op->addr.reg, op->val, op->bytes);
1837 }
1838
1839 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1840 {
1841 switch (op->type) {
1842 case OP_REG:
1843 write_register_operand(op);
1844 break;
1845 case OP_MEM:
1846 if (ctxt->lock_prefix)
1847 return segmented_cmpxchg(ctxt,
1848 op->addr.mem,
1849 &op->orig_val,
1850 &op->val,
1851 op->bytes);
1852 else
1853 return segmented_write(ctxt,
1854 op->addr.mem,
1855 &op->val,
1856 op->bytes);
1857 break;
1858 case OP_MEM_STR:
1859 return segmented_write(ctxt,
1860 op->addr.mem,
1861 op->data,
1862 op->bytes * op->count);
1863 break;
1864 case OP_XMM:
1865 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1866 break;
1867 case OP_MM:
1868 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1869 break;
1870 case OP_NONE:
1871
1872 break;
1873 default:
1874 break;
1875 }
1876 return X86EMUL_CONTINUE;
1877 }
1878
1879 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1880 {
1881 struct segmented_address addr;
1882
1883 rsp_increment(ctxt, -bytes);
1884 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1885 addr.seg = VCPU_SREG_SS;
1886
1887 return segmented_write(ctxt, addr, data, bytes);
1888 }
1889
1890 static int em_push(struct x86_emulate_ctxt *ctxt)
1891 {
1892
1893 ctxt->dst.type = OP_NONE;
1894 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1895 }
1896
1897 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1898 void *dest, int len)
1899 {
1900 int rc;
1901 struct segmented_address addr;
1902
1903 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1904 addr.seg = VCPU_SREG_SS;
1905 rc = segmented_read(ctxt, addr, dest, len);
1906 if (rc != X86EMUL_CONTINUE)
1907 return rc;
1908
1909 rsp_increment(ctxt, len);
1910 return rc;
1911 }
1912
1913 static int em_pop(struct x86_emulate_ctxt *ctxt)
1914 {
1915 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1916 }
1917
1918 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1919 void *dest, int len)
1920 {
1921 int rc;
1922 unsigned long val, change_mask;
1923 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1924 int cpl = ctxt->ops->cpl(ctxt);
1925
1926 rc = emulate_pop(ctxt, &val, len);
1927 if (rc != X86EMUL_CONTINUE)
1928 return rc;
1929
1930 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1931 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1932 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1933 X86_EFLAGS_AC | X86_EFLAGS_ID;
1934
1935 switch(ctxt->mode) {
1936 case X86EMUL_MODE_PROT64:
1937 case X86EMUL_MODE_PROT32:
1938 case X86EMUL_MODE_PROT16:
1939 if (cpl == 0)
1940 change_mask |= X86_EFLAGS_IOPL;
1941 if (cpl <= iopl)
1942 change_mask |= X86_EFLAGS_IF;
1943 break;
1944 case X86EMUL_MODE_VM86:
1945 if (iopl < 3)
1946 return emulate_gp(ctxt, 0);
1947 change_mask |= X86_EFLAGS_IF;
1948 break;
1949 default:
1950 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1951 break;
1952 }
1953
1954 *(unsigned long *)dest =
1955 (ctxt->eflags & ~change_mask) | (val & change_mask);
1956
1957 return rc;
1958 }
1959
1960 static int em_popf(struct x86_emulate_ctxt *ctxt)
1961 {
1962 ctxt->dst.type = OP_REG;
1963 ctxt->dst.addr.reg = &ctxt->eflags;
1964 ctxt->dst.bytes = ctxt->op_bytes;
1965 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1966 }
1967
1968 static int em_enter(struct x86_emulate_ctxt *ctxt)
1969 {
1970 int rc;
1971 unsigned frame_size = ctxt->src.val;
1972 unsigned nesting_level = ctxt->src2.val & 31;
1973 ulong rbp;
1974
1975 if (nesting_level)
1976 return X86EMUL_UNHANDLEABLE;
1977
1978 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1979 rc = push(ctxt, &rbp, stack_size(ctxt));
1980 if (rc != X86EMUL_CONTINUE)
1981 return rc;
1982 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1983 stack_mask(ctxt));
1984 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1985 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1986 stack_mask(ctxt));
1987 return X86EMUL_CONTINUE;
1988 }
1989
1990 static int em_leave(struct x86_emulate_ctxt *ctxt)
1991 {
1992 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1993 stack_mask(ctxt));
1994 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1995 }
1996
1997 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1998 {
1999 int seg = ctxt->src2.val;
2000
2001 ctxt->src.val = get_segment_selector(ctxt, seg);
2002 if (ctxt->op_bytes == 4) {
2003 rsp_increment(ctxt, -2);
2004 ctxt->op_bytes = 2;
2005 }
2006
2007 return em_push(ctxt);
2008 }
2009
2010 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2011 {
2012 int seg = ctxt->src2.val;
2013 unsigned long selector;
2014 int rc;
2015
2016 rc = emulate_pop(ctxt, &selector, 2);
2017 if (rc != X86EMUL_CONTINUE)
2018 return rc;
2019
2020 if (ctxt->modrm_reg == VCPU_SREG_SS)
2021 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2022 if (ctxt->op_bytes > 2)
2023 rsp_increment(ctxt, ctxt->op_bytes - 2);
2024
2025 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2026 return rc;
2027 }
2028
2029 static int em_pusha(struct x86_emulate_ctxt *ctxt)
2030 {
2031 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2032 int rc = X86EMUL_CONTINUE;
2033 int reg = VCPU_REGS_RAX;
2034
2035 while (reg <= VCPU_REGS_RDI) {
2036 (reg == VCPU_REGS_RSP) ?
2037 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2038
2039 rc = em_push(ctxt);
2040 if (rc != X86EMUL_CONTINUE)
2041 return rc;
2042
2043 ++reg;
2044 }
2045
2046 return rc;
2047 }
2048
2049 static int em_pushf(struct x86_emulate_ctxt *ctxt)
2050 {
2051 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2052 return em_push(ctxt);
2053 }
2054
2055 static int em_popa(struct x86_emulate_ctxt *ctxt)
2056 {
2057 int rc = X86EMUL_CONTINUE;
2058 int reg = VCPU_REGS_RDI;
2059 u32 val;
2060
2061 while (reg >= VCPU_REGS_RAX) {
2062 if (reg == VCPU_REGS_RSP) {
2063 rsp_increment(ctxt, ctxt->op_bytes);
2064 --reg;
2065 }
2066
2067 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2068 if (rc != X86EMUL_CONTINUE)
2069 break;
2070 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2071 --reg;
2072 }
2073 return rc;
2074 }
2075
2076 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2077 {
2078 const struct x86_emulate_ops *ops = ctxt->ops;
2079 int rc;
2080 struct desc_ptr dt;
2081 gva_t cs_addr;
2082 gva_t eip_addr;
2083 u16 cs, eip;
2084
2085
2086 ctxt->src.val = ctxt->eflags;
2087 rc = em_push(ctxt);
2088 if (rc != X86EMUL_CONTINUE)
2089 return rc;
2090
2091 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2092
2093 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2094 rc = em_push(ctxt);
2095 if (rc != X86EMUL_CONTINUE)
2096 return rc;
2097
2098 ctxt->src.val = ctxt->_eip;
2099 rc = em_push(ctxt);
2100 if (rc != X86EMUL_CONTINUE)
2101 return rc;
2102
2103 ops->get_idt(ctxt, &dt);
2104
2105 eip_addr = dt.address + (irq << 2);
2106 cs_addr = dt.address + (irq << 2) + 2;
2107
2108 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2109 if (rc != X86EMUL_CONTINUE)
2110 return rc;
2111
2112 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2113 if (rc != X86EMUL_CONTINUE)
2114 return rc;
2115
2116 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 ctxt->_eip = eip;
2121
2122 return rc;
2123 }
2124
2125 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2126 {
2127 int rc;
2128
2129 invalidate_registers(ctxt);
2130 rc = __emulate_int_real(ctxt, irq);
2131 if (rc == X86EMUL_CONTINUE)
2132 writeback_registers(ctxt);
2133 return rc;
2134 }
2135
2136 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2137 {
2138 switch(ctxt->mode) {
2139 case X86EMUL_MODE_REAL:
2140 return __emulate_int_real(ctxt, irq);
2141 case X86EMUL_MODE_VM86:
2142 case X86EMUL_MODE_PROT16:
2143 case X86EMUL_MODE_PROT32:
2144 case X86EMUL_MODE_PROT64:
2145 default:
2146
2147 return X86EMUL_UNHANDLEABLE;
2148 }
2149 }
2150
2151 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2152 {
2153 int rc = X86EMUL_CONTINUE;
2154 unsigned long temp_eip = 0;
2155 unsigned long temp_eflags = 0;
2156 unsigned long cs = 0;
2157 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2158 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2159 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2160 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2161 X86_EFLAGS_AC | X86_EFLAGS_ID |
2162 X86_EFLAGS_FIXED;
2163 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2164 X86_EFLAGS_VIP;
2165
2166
2167
2168 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2169
2170 if (rc != X86EMUL_CONTINUE)
2171 return rc;
2172
2173 if (temp_eip & ~0xffff)
2174 return emulate_gp(ctxt, 0);
2175
2176 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2177
2178 if (rc != X86EMUL_CONTINUE)
2179 return rc;
2180
2181 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2182
2183 if (rc != X86EMUL_CONTINUE)
2184 return rc;
2185
2186 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2187
2188 if (rc != X86EMUL_CONTINUE)
2189 return rc;
2190
2191 ctxt->_eip = temp_eip;
2192
2193 if (ctxt->op_bytes == 4)
2194 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2195 else if (ctxt->op_bytes == 2) {
2196 ctxt->eflags &= ~0xffff;
2197 ctxt->eflags |= temp_eflags;
2198 }
2199
2200 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2201 ctxt->eflags |= X86_EFLAGS_FIXED;
2202 ctxt->ops->set_nmi_mask(ctxt, false);
2203
2204 return rc;
2205 }
2206
2207 static int em_iret(struct x86_emulate_ctxt *ctxt)
2208 {
2209 switch(ctxt->mode) {
2210 case X86EMUL_MODE_REAL:
2211 return emulate_iret_real(ctxt);
2212 case X86EMUL_MODE_VM86:
2213 case X86EMUL_MODE_PROT16:
2214 case X86EMUL_MODE_PROT32:
2215 case X86EMUL_MODE_PROT64:
2216 default:
2217
2218 return X86EMUL_UNHANDLEABLE;
2219 }
2220 }
2221
2222 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2223 {
2224 int rc;
2225 unsigned short sel;
2226 struct desc_struct new_desc;
2227 u8 cpl = ctxt->ops->cpl(ctxt);
2228
2229 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2230
2231 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2232 X86_TRANSFER_CALL_JMP,
2233 &new_desc);
2234 if (rc != X86EMUL_CONTINUE)
2235 return rc;
2236
2237 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2238
2239 if (rc != X86EMUL_CONTINUE)
2240 return X86EMUL_UNHANDLEABLE;
2241
2242 return rc;
2243 }
2244
2245 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2246 {
2247 return assign_eip_near(ctxt, ctxt->src.val);
2248 }
2249
2250 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2251 {
2252 int rc;
2253 long int old_eip;
2254
2255 old_eip = ctxt->_eip;
2256 rc = assign_eip_near(ctxt, ctxt->src.val);
2257 if (rc != X86EMUL_CONTINUE)
2258 return rc;
2259 ctxt->src.val = old_eip;
2260 rc = em_push(ctxt);
2261 return rc;
2262 }
2263
2264 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2265 {
2266 u64 old = ctxt->dst.orig_val64;
2267
2268 if (ctxt->dst.bytes == 16)
2269 return X86EMUL_UNHANDLEABLE;
2270
2271 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2272 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2273 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2274 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2275 ctxt->eflags &= ~X86_EFLAGS_ZF;
2276 } else {
2277 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2278 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2279
2280 ctxt->eflags |= X86_EFLAGS_ZF;
2281 }
2282 return X86EMUL_CONTINUE;
2283 }
2284
2285 static int em_ret(struct x86_emulate_ctxt *ctxt)
2286 {
2287 int rc;
2288 unsigned long eip;
2289
2290 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2291 if (rc != X86EMUL_CONTINUE)
2292 return rc;
2293
2294 return assign_eip_near(ctxt, eip);
2295 }
2296
2297 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2298 {
2299 int rc;
2300 unsigned long eip, cs;
2301 int cpl = ctxt->ops->cpl(ctxt);
2302 struct desc_struct new_desc;
2303
2304 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2305 if (rc != X86EMUL_CONTINUE)
2306 return rc;
2307 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2308 if (rc != X86EMUL_CONTINUE)
2309 return rc;
2310
2311 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2312 return X86EMUL_UNHANDLEABLE;
2313 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2314 X86_TRANSFER_RET,
2315 &new_desc);
2316 if (rc != X86EMUL_CONTINUE)
2317 return rc;
2318 rc = assign_eip_far(ctxt, eip, &new_desc);
2319
2320 if (rc != X86EMUL_CONTINUE)
2321 return X86EMUL_UNHANDLEABLE;
2322
2323 return rc;
2324 }
2325
2326 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2327 {
2328 int rc;
2329
2330 rc = em_ret_far(ctxt);
2331 if (rc != X86EMUL_CONTINUE)
2332 return rc;
2333 rsp_increment(ctxt, ctxt->src.val);
2334 return X86EMUL_CONTINUE;
2335 }
2336
2337 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2338 {
2339
2340 ctxt->dst.orig_val = ctxt->dst.val;
2341 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2342 ctxt->src.orig_val = ctxt->src.val;
2343 ctxt->src.val = ctxt->dst.orig_val;
2344 fastop(ctxt, em_cmp);
2345
2346 if (ctxt->eflags & X86_EFLAGS_ZF) {
2347
2348 ctxt->src.type = OP_NONE;
2349 ctxt->dst.val = ctxt->src.orig_val;
2350 } else {
2351
2352 ctxt->src.type = OP_REG;
2353 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2354 ctxt->src.val = ctxt->dst.orig_val;
2355
2356 ctxt->dst.val = ctxt->dst.orig_val;
2357 }
2358 return X86EMUL_CONTINUE;
2359 }
2360
2361 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2362 {
2363 int seg = ctxt->src2.val;
2364 unsigned short sel;
2365 int rc;
2366
2367 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2368
2369 rc = load_segment_descriptor(ctxt, sel, seg);
2370 if (rc != X86EMUL_CONTINUE)
2371 return rc;
2372
2373 ctxt->dst.val = ctxt->src.val;
2374 return rc;
2375 }
2376
2377 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2378 {
2379 #ifdef CONFIG_X86_64
2380 u32 eax, ebx, ecx, edx;
2381
2382 eax = 0x80000001;
2383 ecx = 0;
2384 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2385 return edx & bit(X86_FEATURE_LM);
2386 #else
2387 return false;
2388 #endif
2389 }
2390
2391 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2392 {
2393 desc->g = (flags >> 23) & 1;
2394 desc->d = (flags >> 22) & 1;
2395 desc->l = (flags >> 21) & 1;
2396 desc->avl = (flags >> 20) & 1;
2397 desc->p = (flags >> 15) & 1;
2398 desc->dpl = (flags >> 13) & 3;
2399 desc->s = (flags >> 12) & 1;
2400 desc->type = (flags >> 8) & 15;
2401 }
2402
2403 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2404 int n)
2405 {
2406 struct desc_struct desc;
2407 int offset;
2408 u16 selector;
2409
2410 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2411
2412 if (n < 3)
2413 offset = 0x7f84 + n * 12;
2414 else
2415 offset = 0x7f2c + (n - 3) * 12;
2416
2417 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2418 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2419 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2420 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2421 return X86EMUL_CONTINUE;
2422 }
2423
2424 #ifdef CONFIG_X86_64
2425 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2426 int n)
2427 {
2428 struct desc_struct desc;
2429 int offset;
2430 u16 selector;
2431 u32 base3;
2432
2433 offset = 0x7e00 + n * 16;
2434
2435 selector = GET_SMSTATE(u16, smstate, offset);
2436 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2437 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2438 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2439 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2440
2441 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2442 return X86EMUL_CONTINUE;
2443 }
2444 #endif
2445
2446 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2447 u64 cr0, u64 cr3, u64 cr4)
2448 {
2449 int bad;
2450 u64 pcid;
2451
2452
2453 pcid = 0;
2454 if (cr4 & X86_CR4_PCIDE) {
2455 pcid = cr3 & 0xfff;
2456 cr3 &= ~0xfff;
2457 }
2458
2459 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2460 if (bad)
2461 return X86EMUL_UNHANDLEABLE;
2462
2463
2464
2465
2466
2467
2468 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2469 if (bad)
2470 return X86EMUL_UNHANDLEABLE;
2471
2472 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2473 if (bad)
2474 return X86EMUL_UNHANDLEABLE;
2475
2476 if (cr4 & X86_CR4_PCIDE) {
2477 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2478 if (bad)
2479 return X86EMUL_UNHANDLEABLE;
2480 if (pcid) {
2481 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2482 if (bad)
2483 return X86EMUL_UNHANDLEABLE;
2484 }
2485
2486 }
2487
2488 return X86EMUL_CONTINUE;
2489 }
2490
2491 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2492 const char *smstate)
2493 {
2494 struct desc_struct desc;
2495 struct desc_ptr dt;
2496 u16 selector;
2497 u32 val, cr0, cr3, cr4;
2498 int i;
2499
2500 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2501 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2502 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2503 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2504
2505 for (i = 0; i < 8; i++)
2506 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2507
2508 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2509 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2510 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2511 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2512
2513 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2514 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2515 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2516 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2517 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2518
2519 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2520 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2521 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2522 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2523 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2524
2525 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2526 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2527 ctxt->ops->set_gdt(ctxt, &dt);
2528
2529 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2530 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2531 ctxt->ops->set_idt(ctxt, &dt);
2532
2533 for (i = 0; i < 6; i++) {
2534 int r = rsm_load_seg_32(ctxt, smstate, i);
2535 if (r != X86EMUL_CONTINUE)
2536 return r;
2537 }
2538
2539 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2540
2541 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2542
2543 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2544 }
2545
2546 #ifdef CONFIG_X86_64
2547 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2548 const char *smstate)
2549 {
2550 struct desc_struct desc;
2551 struct desc_ptr dt;
2552 u64 val, cr0, cr3, cr4;
2553 u32 base3;
2554 u16 selector;
2555 int i, r;
2556
2557 for (i = 0; i < 16; i++)
2558 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2559
2560 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2561 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2562
2563 val = GET_SMSTATE(u32, smstate, 0x7f68);
2564 ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2565 val = GET_SMSTATE(u32, smstate, 0x7f60);
2566 ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
2567
2568 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2569 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2570 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2571 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2572 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2573 ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
2574
2575 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2576 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2577 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2578 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2579 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2580 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2581
2582 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2583 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2584 ctxt->ops->set_idt(ctxt, &dt);
2585
2586 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2587 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2588 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2589 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2590 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2591 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2592
2593 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2594 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2595 ctxt->ops->set_gdt(ctxt, &dt);
2596
2597 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2598 if (r != X86EMUL_CONTINUE)
2599 return r;
2600
2601 for (i = 0; i < 6; i++) {
2602 r = rsm_load_seg_64(ctxt, smstate, i);
2603 if (r != X86EMUL_CONTINUE)
2604 return r;
2605 }
2606
2607 return X86EMUL_CONTINUE;
2608 }
2609 #endif
2610
2611 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2612 {
2613 unsigned long cr0, cr4, efer;
2614 char buf[512];
2615 u64 smbase;
2616 int ret;
2617
2618 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2619 return emulate_ud(ctxt);
2620
2621 smbase = ctxt->ops->get_smbase(ctxt);
2622
2623 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2624 if (ret != X86EMUL_CONTINUE)
2625 return X86EMUL_UNHANDLEABLE;
2626
2627 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2628 ctxt->ops->set_nmi_mask(ctxt, false);
2629
2630 ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
2631 ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
2632
2633
2634
2635
2636
2637
2638 if (emulator_has_longmode(ctxt)) {
2639 struct desc_struct cs_desc;
2640
2641
2642 cr4 = ctxt->ops->get_cr(ctxt, 4);
2643 if (cr4 & X86_CR4_PCIDE)
2644 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2645
2646
2647 memset(&cs_desc, 0, sizeof(cs_desc));
2648 cs_desc.type = 0xb;
2649 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2650 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2651 }
2652
2653
2654 cr0 = ctxt->ops->get_cr(ctxt, 0);
2655 if (cr0 & X86_CR0_PE)
2656 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2657
2658 if (emulator_has_longmode(ctxt)) {
2659
2660 cr4 = ctxt->ops->get_cr(ctxt, 4);
2661 if (cr4 & X86_CR4_PAE)
2662 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2663
2664
2665 efer = 0;
2666 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2667 }
2668
2669
2670
2671
2672
2673
2674 if (ctxt->ops->pre_leave_smm(ctxt, buf))
2675 return X86EMUL_UNHANDLEABLE;
2676
2677 #ifdef CONFIG_X86_64
2678 if (emulator_has_longmode(ctxt))
2679 ret = rsm_load_state_64(ctxt, buf);
2680 else
2681 #endif
2682 ret = rsm_load_state_32(ctxt, buf);
2683
2684 if (ret != X86EMUL_CONTINUE) {
2685
2686 return X86EMUL_UNHANDLEABLE;
2687 }
2688
2689 ctxt->ops->post_leave_smm(ctxt);
2690
2691 return X86EMUL_CONTINUE;
2692 }
2693
2694 static void
2695 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2696 struct desc_struct *cs, struct desc_struct *ss)
2697 {
2698 cs->l = 0;
2699 set_desc_base(cs, 0);
2700 cs->g = 1;
2701 set_desc_limit(cs, 0xfffff);
2702 cs->type = 0x0b;
2703 cs->s = 1;
2704 cs->dpl = 0;
2705 cs->p = 1;
2706 cs->d = 1;
2707 cs->avl = 0;
2708
2709 set_desc_base(ss, 0);
2710 set_desc_limit(ss, 0xfffff);
2711 ss->g = 1;
2712 ss->s = 1;
2713 ss->type = 0x03;
2714 ss->d = 1;
2715 ss->dpl = 0;
2716 ss->p = 1;
2717 ss->l = 0;
2718 ss->avl = 0;
2719 }
2720
2721 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2722 {
2723 u32 eax, ebx, ecx, edx;
2724
2725 eax = ecx = 0;
2726 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2727 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2728 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2729 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2730 }
2731
2732 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2733 {
2734 const struct x86_emulate_ops *ops = ctxt->ops;
2735 u32 eax, ebx, ecx, edx;
2736
2737
2738
2739
2740
2741 if (ctxt->mode == X86EMUL_MODE_PROT64)
2742 return true;
2743
2744 eax = 0x00000000;
2745 ecx = 0x00000000;
2746 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2747
2748
2749
2750
2751
2752
2753
2754
2755 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2756 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2757 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2758 return false;
2759
2760
2761 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2762 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2763 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2764 return true;
2765
2766
2767 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2768 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2769 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2770 return true;
2771
2772
2773 if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
2774 ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
2775 edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
2776 return true;
2777
2778
2779
2780
2781
2782 return false;
2783 }
2784
2785 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2786 {
2787 const struct x86_emulate_ops *ops = ctxt->ops;
2788 struct desc_struct cs, ss;
2789 u64 msr_data;
2790 u16 cs_sel, ss_sel;
2791 u64 efer = 0;
2792
2793
2794 if (ctxt->mode == X86EMUL_MODE_REAL ||
2795 ctxt->mode == X86EMUL_MODE_VM86)
2796 return emulate_ud(ctxt);
2797
2798 if (!(em_syscall_is_enabled(ctxt)))
2799 return emulate_ud(ctxt);
2800
2801 ops->get_msr(ctxt, MSR_EFER, &efer);
2802 setup_syscalls_segments(ctxt, &cs, &ss);
2803
2804 if (!(efer & EFER_SCE))
2805 return emulate_ud(ctxt);
2806
2807 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2808 msr_data >>= 32;
2809 cs_sel = (u16)(msr_data & 0xfffc);
2810 ss_sel = (u16)(msr_data + 8);
2811
2812 if (efer & EFER_LMA) {
2813 cs.d = 0;
2814 cs.l = 1;
2815 }
2816 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2817 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2818
2819 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2820 if (efer & EFER_LMA) {
2821 #ifdef CONFIG_X86_64
2822 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2823
2824 ops->get_msr(ctxt,
2825 ctxt->mode == X86EMUL_MODE_PROT64 ?
2826 MSR_LSTAR : MSR_CSTAR, &msr_data);
2827 ctxt->_eip = msr_data;
2828
2829 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2830 ctxt->eflags &= ~msr_data;
2831 ctxt->eflags |= X86_EFLAGS_FIXED;
2832 #endif
2833 } else {
2834
2835 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2836 ctxt->_eip = (u32)msr_data;
2837
2838 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2839 }
2840
2841 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2842 return X86EMUL_CONTINUE;
2843 }
2844
2845 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2846 {
2847 const struct x86_emulate_ops *ops = ctxt->ops;
2848 struct desc_struct cs, ss;
2849 u64 msr_data;
2850 u16 cs_sel, ss_sel;
2851 u64 efer = 0;
2852
2853 ops->get_msr(ctxt, MSR_EFER, &efer);
2854
2855 if (ctxt->mode == X86EMUL_MODE_REAL)
2856 return emulate_gp(ctxt, 0);
2857
2858
2859
2860
2861
2862 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2863 && !vendor_intel(ctxt))
2864 return emulate_ud(ctxt);
2865
2866
2867 if (ctxt->mode == X86EMUL_MODE_PROT64)
2868 return X86EMUL_UNHANDLEABLE;
2869
2870 setup_syscalls_segments(ctxt, &cs, &ss);
2871
2872 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2873 if ((msr_data & 0xfffc) == 0x0)
2874 return emulate_gp(ctxt, 0);
2875
2876 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2877 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2878 ss_sel = cs_sel + 8;
2879 if (efer & EFER_LMA) {
2880 cs.d = 0;
2881 cs.l = 1;
2882 }
2883
2884 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2885 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2886
2887 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2888 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2889
2890 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2891 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2892 (u32)msr_data;
2893
2894 return X86EMUL_CONTINUE;
2895 }
2896
2897 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2898 {
2899 const struct x86_emulate_ops *ops = ctxt->ops;
2900 struct desc_struct cs, ss;
2901 u64 msr_data, rcx, rdx;
2902 int usermode;
2903 u16 cs_sel = 0, ss_sel = 0;
2904
2905
2906 if (ctxt->mode == X86EMUL_MODE_REAL ||
2907 ctxt->mode == X86EMUL_MODE_VM86)
2908 return emulate_gp(ctxt, 0);
2909
2910 setup_syscalls_segments(ctxt, &cs, &ss);
2911
2912 if ((ctxt->rex_prefix & 0x8) != 0x0)
2913 usermode = X86EMUL_MODE_PROT64;
2914 else
2915 usermode = X86EMUL_MODE_PROT32;
2916
2917 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2918 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2919
2920 cs.dpl = 3;
2921 ss.dpl = 3;
2922 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2923 switch (usermode) {
2924 case X86EMUL_MODE_PROT32:
2925 cs_sel = (u16)(msr_data + 16);
2926 if ((msr_data & 0xfffc) == 0x0)
2927 return emulate_gp(ctxt, 0);
2928 ss_sel = (u16)(msr_data + 24);
2929 rcx = (u32)rcx;
2930 rdx = (u32)rdx;
2931 break;
2932 case X86EMUL_MODE_PROT64:
2933 cs_sel = (u16)(msr_data + 32);
2934 if (msr_data == 0x0)
2935 return emulate_gp(ctxt, 0);
2936 ss_sel = cs_sel + 8;
2937 cs.d = 0;
2938 cs.l = 1;
2939 if (emul_is_noncanonical_address(rcx, ctxt) ||
2940 emul_is_noncanonical_address(rdx, ctxt))
2941 return emulate_gp(ctxt, 0);
2942 break;
2943 }
2944 cs_sel |= SEGMENT_RPL_MASK;
2945 ss_sel |= SEGMENT_RPL_MASK;
2946
2947 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2948 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2949
2950 ctxt->_eip = rdx;
2951 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2952
2953 return X86EMUL_CONTINUE;
2954 }
2955
2956 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2957 {
2958 int iopl;
2959 if (ctxt->mode == X86EMUL_MODE_REAL)
2960 return false;
2961 if (ctxt->mode == X86EMUL_MODE_VM86)
2962 return true;
2963 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2964 return ctxt->ops->cpl(ctxt) > iopl;
2965 }
2966
2967 #define VMWARE_PORT_VMPORT (0x5658)
2968 #define VMWARE_PORT_VMRPC (0x5659)
2969
2970 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2971 u16 port, u16 len)
2972 {
2973 const struct x86_emulate_ops *ops = ctxt->ops;
2974 struct desc_struct tr_seg;
2975 u32 base3;
2976 int r;
2977 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2978 unsigned mask = (1 << len) - 1;
2979 unsigned long base;
2980
2981
2982
2983
2984
2985 if (enable_vmware_backdoor &&
2986 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2987 return true;
2988
2989 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2990 if (!tr_seg.p)
2991 return false;
2992 if (desc_limit_scaled(&tr_seg) < 103)
2993 return false;
2994 base = get_desc_base(&tr_seg);
2995 #ifdef CONFIG_X86_64
2996 base |= ((u64)base3) << 32;
2997 #endif
2998 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2999 if (r != X86EMUL_CONTINUE)
3000 return false;
3001 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
3002 return false;
3003 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
3004 if (r != X86EMUL_CONTINUE)
3005 return false;
3006 if ((perm >> bit_idx) & mask)
3007 return false;
3008 return true;
3009 }
3010
3011 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
3012 u16 port, u16 len)
3013 {
3014 if (ctxt->perm_ok)
3015 return true;
3016
3017 if (emulator_bad_iopl(ctxt))
3018 if (!emulator_io_port_access_allowed(ctxt, port, len))
3019 return false;
3020
3021 ctxt->perm_ok = true;
3022
3023 return true;
3024 }
3025
3026 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
3027 {
3028
3029
3030
3031
3032 #ifdef CONFIG_X86_64
3033 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
3034 return;
3035
3036 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
3037
3038 switch (ctxt->b) {
3039 case 0xa4:
3040 case 0xa5:
3041 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3042
3043 case 0xaa:
3044 case 0xab:
3045 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
3046 }
3047 #endif
3048 }
3049
3050 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
3051 struct tss_segment_16 *tss)
3052 {
3053 tss->ip = ctxt->_eip;
3054 tss->flag = ctxt->eflags;
3055 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
3056 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
3057 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
3058 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
3059 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
3060 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
3061 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
3062 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3063
3064 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3065 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3066 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3067 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3068 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3069 }
3070
3071 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
3072 struct tss_segment_16 *tss)
3073 {
3074 int ret;
3075 u8 cpl;
3076
3077 ctxt->_eip = tss->ip;
3078 ctxt->eflags = tss->flag | 2;
3079 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
3080 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
3081 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
3082 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
3083 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
3084 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
3085 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
3086 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3087
3088
3089
3090
3091
3092 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
3093 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3094 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3095 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3096 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3097
3098 cpl = tss->cs & 3;
3099
3100
3101
3102
3103
3104 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3105 X86_TRANSFER_TASK_SWITCH, NULL);
3106 if (ret != X86EMUL_CONTINUE)
3107 return ret;
3108 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3109 X86_TRANSFER_TASK_SWITCH, NULL);
3110 if (ret != X86EMUL_CONTINUE)
3111 return ret;
3112 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3113 X86_TRANSFER_TASK_SWITCH, NULL);
3114 if (ret != X86EMUL_CONTINUE)
3115 return ret;
3116 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3117 X86_TRANSFER_TASK_SWITCH, NULL);
3118 if (ret != X86EMUL_CONTINUE)
3119 return ret;
3120 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3121 X86_TRANSFER_TASK_SWITCH, NULL);
3122 if (ret != X86EMUL_CONTINUE)
3123 return ret;
3124
3125 return X86EMUL_CONTINUE;
3126 }
3127
3128 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3129 u16 tss_selector, u16 old_tss_sel,
3130 ulong old_tss_base, struct desc_struct *new_desc)
3131 {
3132 struct tss_segment_16 tss_seg;
3133 int ret;
3134 u32 new_tss_base = get_desc_base(new_desc);
3135
3136 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3137 if (ret != X86EMUL_CONTINUE)
3138 return ret;
3139
3140 save_state_to_tss16(ctxt, &tss_seg);
3141
3142 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3143 if (ret != X86EMUL_CONTINUE)
3144 return ret;
3145
3146 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3147 if (ret != X86EMUL_CONTINUE)
3148 return ret;
3149
3150 if (old_tss_sel != 0xffff) {
3151 tss_seg.prev_task_link = old_tss_sel;
3152
3153 ret = linear_write_system(ctxt, new_tss_base,
3154 &tss_seg.prev_task_link,
3155 sizeof(tss_seg.prev_task_link));
3156 if (ret != X86EMUL_CONTINUE)
3157 return ret;
3158 }
3159
3160 return load_state_from_tss16(ctxt, &tss_seg);
3161 }
3162
3163 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3164 struct tss_segment_32 *tss)
3165 {
3166
3167 tss->eip = ctxt->_eip;
3168 tss->eflags = ctxt->eflags;
3169 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3170 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3171 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3172 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3173 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3174 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3175 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3176 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3177
3178 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3179 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3180 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3181 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3182 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3183 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3184 }
3185
3186 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3187 struct tss_segment_32 *tss)
3188 {
3189 int ret;
3190 u8 cpl;
3191
3192 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3193 return emulate_gp(ctxt, 0);
3194 ctxt->_eip = tss->eip;
3195 ctxt->eflags = tss->eflags | 2;
3196
3197
3198 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3199 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3200 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3201 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3202 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3203 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3204 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3205 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3206
3207
3208
3209
3210
3211
3212 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3213 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3214 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3215 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3216 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3217 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3218 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3219
3220
3221
3222
3223
3224
3225 if (ctxt->eflags & X86_EFLAGS_VM) {
3226 ctxt->mode = X86EMUL_MODE_VM86;
3227 cpl = 3;
3228 } else {
3229 ctxt->mode = X86EMUL_MODE_PROT32;
3230 cpl = tss->cs & 3;
3231 }
3232
3233
3234
3235
3236
3237 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3238 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3239 if (ret != X86EMUL_CONTINUE)
3240 return ret;
3241 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3242 X86_TRANSFER_TASK_SWITCH, NULL);
3243 if (ret != X86EMUL_CONTINUE)
3244 return ret;
3245 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3246 X86_TRANSFER_TASK_SWITCH, NULL);
3247 if (ret != X86EMUL_CONTINUE)
3248 return ret;
3249 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3250 X86_TRANSFER_TASK_SWITCH, NULL);
3251 if (ret != X86EMUL_CONTINUE)
3252 return ret;
3253 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3254 X86_TRANSFER_TASK_SWITCH, NULL);
3255 if (ret != X86EMUL_CONTINUE)
3256 return ret;
3257 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3258 X86_TRANSFER_TASK_SWITCH, NULL);
3259 if (ret != X86EMUL_CONTINUE)
3260 return ret;
3261 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3262 X86_TRANSFER_TASK_SWITCH, NULL);
3263
3264 return ret;
3265 }
3266
3267 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3268 u16 tss_selector, u16 old_tss_sel,
3269 ulong old_tss_base, struct desc_struct *new_desc)
3270 {
3271 struct tss_segment_32 tss_seg;
3272 int ret;
3273 u32 new_tss_base = get_desc_base(new_desc);
3274 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3275 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3276
3277 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3278 if (ret != X86EMUL_CONTINUE)
3279 return ret;
3280
3281 save_state_to_tss32(ctxt, &tss_seg);
3282
3283
3284 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3285 ldt_sel_offset - eip_offset);
3286 if (ret != X86EMUL_CONTINUE)
3287 return ret;
3288
3289 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3290 if (ret != X86EMUL_CONTINUE)
3291 return ret;
3292
3293 if (old_tss_sel != 0xffff) {
3294 tss_seg.prev_task_link = old_tss_sel;
3295
3296 ret = linear_write_system(ctxt, new_tss_base,
3297 &tss_seg.prev_task_link,
3298 sizeof(tss_seg.prev_task_link));
3299 if (ret != X86EMUL_CONTINUE)
3300 return ret;
3301 }
3302
3303 return load_state_from_tss32(ctxt, &tss_seg);
3304 }
3305
3306 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3307 u16 tss_selector, int idt_index, int reason,
3308 bool has_error_code, u32 error_code)
3309 {
3310 const struct x86_emulate_ops *ops = ctxt->ops;
3311 struct desc_struct curr_tss_desc, next_tss_desc;
3312 int ret;
3313 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3314 ulong old_tss_base =
3315 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3316 u32 desc_limit;
3317 ulong desc_addr, dr7;
3318
3319
3320
3321 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3322 if (ret != X86EMUL_CONTINUE)
3323 return ret;
3324 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3325 if (ret != X86EMUL_CONTINUE)
3326 return ret;
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338 if (reason == TASK_SWITCH_GATE) {
3339 if (idt_index != -1) {
3340
3341 struct desc_struct task_gate_desc;
3342 int dpl;
3343
3344 ret = read_interrupt_descriptor(ctxt, idt_index,
3345 &task_gate_desc);
3346 if (ret != X86EMUL_CONTINUE)
3347 return ret;
3348
3349 dpl = task_gate_desc.dpl;
3350 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3351 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3352 }
3353 }
3354
3355 desc_limit = desc_limit_scaled(&next_tss_desc);
3356 if (!next_tss_desc.p ||
3357 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3358 desc_limit < 0x2b)) {
3359 return emulate_ts(ctxt, tss_selector & 0xfffc);
3360 }
3361
3362 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3363 curr_tss_desc.type &= ~(1 << 1);
3364 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3365 }
3366
3367 if (reason == TASK_SWITCH_IRET)
3368 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3369
3370
3371
3372 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3373 old_tss_sel = 0xffff;
3374
3375 if (next_tss_desc.type & 8)
3376 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3377 old_tss_base, &next_tss_desc);
3378 else
3379 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3380 old_tss_base, &next_tss_desc);
3381 if (ret != X86EMUL_CONTINUE)
3382 return ret;
3383
3384 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3385 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3386
3387 if (reason != TASK_SWITCH_IRET) {
3388 next_tss_desc.type |= (1 << 1);
3389 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3390 }
3391
3392 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3393 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3394
3395 if (has_error_code) {
3396 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3397 ctxt->lock_prefix = 0;
3398 ctxt->src.val = (unsigned long) error_code;
3399 ret = em_push(ctxt);
3400 }
3401
3402 ops->get_dr(ctxt, 7, &dr7);
3403 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3404
3405 return ret;
3406 }
3407
3408 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3409 u16 tss_selector, int idt_index, int reason,
3410 bool has_error_code, u32 error_code)
3411 {
3412 int rc;
3413
3414 invalidate_registers(ctxt);
3415 ctxt->_eip = ctxt->eip;
3416 ctxt->dst.type = OP_NONE;
3417
3418 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3419 has_error_code, error_code);
3420
3421 if (rc == X86EMUL_CONTINUE) {
3422 ctxt->eip = ctxt->_eip;
3423 writeback_registers(ctxt);
3424 }
3425
3426 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3427 }
3428
3429 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3430 struct operand *op)
3431 {
3432 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3433
3434 register_address_increment(ctxt, reg, df * op->bytes);
3435 op->addr.mem.ea = register_address(ctxt, reg);
3436 }
3437
3438 static int em_das(struct x86_emulate_ctxt *ctxt)
3439 {
3440 u8 al, old_al;
3441 bool af, cf, old_cf;
3442
3443 cf = ctxt->eflags & X86_EFLAGS_CF;
3444 al = ctxt->dst.val;
3445
3446 old_al = al;
3447 old_cf = cf;
3448 cf = false;
3449 af = ctxt->eflags & X86_EFLAGS_AF;
3450 if ((al & 0x0f) > 9 || af) {
3451 al -= 6;
3452 cf = old_cf | (al >= 250);
3453 af = true;
3454 } else {
3455 af = false;
3456 }
3457 if (old_al > 0x99 || old_cf) {
3458 al -= 0x60;
3459 cf = true;
3460 }
3461
3462 ctxt->dst.val = al;
3463
3464 ctxt->src.type = OP_IMM;
3465 ctxt->src.val = 0;
3466 ctxt->src.bytes = 1;
3467 fastop(ctxt, em_or);
3468 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3469 if (cf)
3470 ctxt->eflags |= X86_EFLAGS_CF;
3471 if (af)
3472 ctxt->eflags |= X86_EFLAGS_AF;
3473 return X86EMUL_CONTINUE;
3474 }
3475
3476 static int em_aam(struct x86_emulate_ctxt *ctxt)
3477 {
3478 u8 al, ah;
3479
3480 if (ctxt->src.val == 0)
3481 return emulate_de(ctxt);
3482
3483 al = ctxt->dst.val & 0xff;
3484 ah = al / ctxt->src.val;
3485 al %= ctxt->src.val;
3486
3487 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3488
3489
3490 ctxt->src.type = OP_IMM;
3491 ctxt->src.val = 0;
3492 ctxt->src.bytes = 1;
3493 fastop(ctxt, em_or);
3494
3495 return X86EMUL_CONTINUE;
3496 }
3497
3498 static int em_aad(struct x86_emulate_ctxt *ctxt)
3499 {
3500 u8 al = ctxt->dst.val & 0xff;
3501 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3502
3503 al = (al + (ah * ctxt->src.val)) & 0xff;
3504
3505 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3506
3507
3508 ctxt->src.type = OP_IMM;
3509 ctxt->src.val = 0;
3510 ctxt->src.bytes = 1;
3511 fastop(ctxt, em_or);
3512
3513 return X86EMUL_CONTINUE;
3514 }
3515
3516 static int em_call(struct x86_emulate_ctxt *ctxt)
3517 {
3518 int rc;
3519 long rel = ctxt->src.val;
3520
3521 ctxt->src.val = (unsigned long)ctxt->_eip;
3522 rc = jmp_rel(ctxt, rel);
3523 if (rc != X86EMUL_CONTINUE)
3524 return rc;
3525 return em_push(ctxt);
3526 }
3527
3528 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3529 {
3530 u16 sel, old_cs;
3531 ulong old_eip;
3532 int rc;
3533 struct desc_struct old_desc, new_desc;
3534 const struct x86_emulate_ops *ops = ctxt->ops;
3535 int cpl = ctxt->ops->cpl(ctxt);
3536 enum x86emul_mode prev_mode = ctxt->mode;
3537
3538 old_eip = ctxt->_eip;
3539 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3540
3541 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3542 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3543 X86_TRANSFER_CALL_JMP, &new_desc);
3544 if (rc != X86EMUL_CONTINUE)
3545 return rc;
3546
3547 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3548 if (rc != X86EMUL_CONTINUE)
3549 goto fail;
3550
3551 ctxt->src.val = old_cs;
3552 rc = em_push(ctxt);
3553 if (rc != X86EMUL_CONTINUE)
3554 goto fail;
3555
3556 ctxt->src.val = old_eip;
3557 rc = em_push(ctxt);
3558
3559
3560 if (rc != X86EMUL_CONTINUE) {
3561 pr_warn_once("faulting far call emulation tainted memory\n");
3562 goto fail;
3563 }
3564 return rc;
3565 fail:
3566 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3567 ctxt->mode = prev_mode;
3568 return rc;
3569
3570 }
3571
3572 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3573 {
3574 int rc;
3575 unsigned long eip;
3576
3577 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3578 if (rc != X86EMUL_CONTINUE)
3579 return rc;
3580 rc = assign_eip_near(ctxt, eip);
3581 if (rc != X86EMUL_CONTINUE)
3582 return rc;
3583 rsp_increment(ctxt, ctxt->src.val);
3584 return X86EMUL_CONTINUE;
3585 }
3586
3587 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3588 {
3589
3590 ctxt->src.val = ctxt->dst.val;
3591 write_register_operand(&ctxt->src);
3592
3593
3594 ctxt->dst.val = ctxt->src.orig_val;
3595 ctxt->lock_prefix = 1;
3596 return X86EMUL_CONTINUE;
3597 }
3598
3599 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3600 {
3601 ctxt->dst.val = ctxt->src2.val;
3602 return fastop(ctxt, em_imul);
3603 }
3604
3605 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3606 {
3607 ctxt->dst.type = OP_REG;
3608 ctxt->dst.bytes = ctxt->src.bytes;
3609 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3610 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3611
3612 return X86EMUL_CONTINUE;
3613 }
3614
3615 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3616 {
3617 u64 tsc_aux = 0;
3618
3619 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3620 return emulate_gp(ctxt, 0);
3621 ctxt->dst.val = tsc_aux;
3622 return X86EMUL_CONTINUE;
3623 }
3624
3625 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3626 {
3627 u64 tsc = 0;
3628
3629 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3630 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3631 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3632 return X86EMUL_CONTINUE;
3633 }
3634
3635 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3636 {
3637 u64 pmc;
3638
3639 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3640 return emulate_gp(ctxt, 0);
3641 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3642 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3643 return X86EMUL_CONTINUE;
3644 }
3645
3646 static int em_mov(struct x86_emulate_ctxt *ctxt)
3647 {
3648 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3649 return X86EMUL_CONTINUE;
3650 }
3651
3652 #define FFL(x) bit(X86_FEATURE_##x)
3653
3654 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3655 {
3656 u32 ebx, ecx, edx, eax = 1;
3657 u16 tmp;
3658
3659
3660
3661
3662 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3663 if (!(ecx & FFL(MOVBE)))
3664 return emulate_ud(ctxt);
3665
3666 switch (ctxt->op_bytes) {
3667 case 2:
3668
3669
3670
3671
3672
3673
3674
3675
3676 tmp = (u16)ctxt->src.val;
3677 ctxt->dst.val &= ~0xffffUL;
3678 ctxt->dst.val |= (unsigned long)swab16(tmp);
3679 break;
3680 case 4:
3681 ctxt->dst.val = swab32((u32)ctxt->src.val);
3682 break;
3683 case 8:
3684 ctxt->dst.val = swab64(ctxt->src.val);
3685 break;
3686 default:
3687 BUG();
3688 }
3689 return X86EMUL_CONTINUE;
3690 }
3691
3692 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3693 {
3694 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3695 return emulate_gp(ctxt, 0);
3696
3697
3698 ctxt->dst.type = OP_NONE;
3699 return X86EMUL_CONTINUE;
3700 }
3701
3702 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3703 {
3704 unsigned long val;
3705
3706 if (ctxt->mode == X86EMUL_MODE_PROT64)
3707 val = ctxt->src.val & ~0ULL;
3708 else
3709 val = ctxt->src.val & ~0U;
3710
3711
3712 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3713 return emulate_gp(ctxt, 0);
3714
3715
3716 ctxt->dst.type = OP_NONE;
3717 return X86EMUL_CONTINUE;
3718 }
3719
3720 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3721 {
3722 u64 msr_data;
3723
3724 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3725 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3726 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3727 return emulate_gp(ctxt, 0);
3728
3729 return X86EMUL_CONTINUE;
3730 }
3731
3732 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3733 {
3734 u64 msr_data;
3735
3736 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3737 return emulate_gp(ctxt, 0);
3738
3739 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3740 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3741 return X86EMUL_CONTINUE;
3742 }
3743
3744 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3745 {
3746 if (segment > VCPU_SREG_GS &&
3747 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3748 ctxt->ops->cpl(ctxt) > 0)
3749 return emulate_gp(ctxt, 0);
3750
3751 ctxt->dst.val = get_segment_selector(ctxt, segment);
3752 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3753 ctxt->dst.bytes = 2;
3754 return X86EMUL_CONTINUE;
3755 }
3756
3757 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3758 {
3759 if (ctxt->modrm_reg > VCPU_SREG_GS)
3760 return emulate_ud(ctxt);
3761
3762 return em_store_sreg(ctxt, ctxt->modrm_reg);
3763 }
3764
3765 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3766 {
3767 u16 sel = ctxt->src.val;
3768
3769 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3770 return emulate_ud(ctxt);
3771
3772 if (ctxt->modrm_reg == VCPU_SREG_SS)
3773 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3774
3775
3776 ctxt->dst.type = OP_NONE;
3777 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3778 }
3779
3780 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3781 {
3782 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3783 }
3784
3785 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3786 {
3787 u16 sel = ctxt->src.val;
3788
3789
3790 ctxt->dst.type = OP_NONE;
3791 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3792 }
3793
3794 static int em_str(struct x86_emulate_ctxt *ctxt)
3795 {
3796 return em_store_sreg(ctxt, VCPU_SREG_TR);
3797 }
3798
3799 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3800 {
3801 u16 sel = ctxt->src.val;
3802
3803
3804 ctxt->dst.type = OP_NONE;
3805 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3806 }
3807
3808 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3809 {
3810 int rc;
3811 ulong linear;
3812
3813 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3814 if (rc == X86EMUL_CONTINUE)
3815 ctxt->ops->invlpg(ctxt, linear);
3816
3817 ctxt->dst.type = OP_NONE;
3818 return X86EMUL_CONTINUE;
3819 }
3820
3821 static int em_clts(struct x86_emulate_ctxt *ctxt)
3822 {
3823 ulong cr0;
3824
3825 cr0 = ctxt->ops->get_cr(ctxt, 0);
3826 cr0 &= ~X86_CR0_TS;
3827 ctxt->ops->set_cr(ctxt, 0, cr0);
3828 return X86EMUL_CONTINUE;
3829 }
3830
3831 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3832 {
3833 int rc = ctxt->ops->fix_hypercall(ctxt);
3834
3835 if (rc != X86EMUL_CONTINUE)
3836 return rc;
3837
3838
3839 ctxt->_eip = ctxt->eip;
3840
3841 ctxt->dst.type = OP_NONE;
3842 return X86EMUL_CONTINUE;
3843 }
3844
3845 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3846 void (*get)(struct x86_emulate_ctxt *ctxt,
3847 struct desc_ptr *ptr))
3848 {
3849 struct desc_ptr desc_ptr;
3850
3851 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3852 ctxt->ops->cpl(ctxt) > 0)
3853 return emulate_gp(ctxt, 0);
3854
3855 if (ctxt->mode == X86EMUL_MODE_PROT64)
3856 ctxt->op_bytes = 8;
3857 get(ctxt, &desc_ptr);
3858 if (ctxt->op_bytes == 2) {
3859 ctxt->op_bytes = 4;
3860 desc_ptr.address &= 0x00ffffff;
3861 }
3862
3863 ctxt->dst.type = OP_NONE;
3864 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3865 &desc_ptr, 2 + ctxt->op_bytes);
3866 }
3867
3868 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3869 {
3870 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3871 }
3872
3873 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3874 {
3875 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3876 }
3877
3878 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3879 {
3880 struct desc_ptr desc_ptr;
3881 int rc;
3882
3883 if (ctxt->mode == X86EMUL_MODE_PROT64)
3884 ctxt->op_bytes = 8;
3885 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3886 &desc_ptr.size, &desc_ptr.address,
3887 ctxt->op_bytes);
3888 if (rc != X86EMUL_CONTINUE)
3889 return rc;
3890 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3891 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3892 return emulate_gp(ctxt, 0);
3893 if (lgdt)
3894 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3895 else
3896 ctxt->ops->set_idt(ctxt, &desc_ptr);
3897
3898 ctxt->dst.type = OP_NONE;
3899 return X86EMUL_CONTINUE;
3900 }
3901
3902 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3903 {
3904 return em_lgdt_lidt(ctxt, true);
3905 }
3906
3907 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3908 {
3909 return em_lgdt_lidt(ctxt, false);
3910 }
3911
3912 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3913 {
3914 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3915 ctxt->ops->cpl(ctxt) > 0)
3916 return emulate_gp(ctxt, 0);
3917
3918 if (ctxt->dst.type == OP_MEM)
3919 ctxt->dst.bytes = 2;
3920 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3921 return X86EMUL_CONTINUE;
3922 }
3923
3924 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3925 {
3926 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3927 | (ctxt->src.val & 0x0f));
3928 ctxt->dst.type = OP_NONE;
3929 return X86EMUL_CONTINUE;
3930 }
3931
3932 static int em_loop(struct x86_emulate_ctxt *ctxt)
3933 {
3934 int rc = X86EMUL_CONTINUE;
3935
3936 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3937 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3938 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3939 rc = jmp_rel(ctxt, ctxt->src.val);
3940
3941 return rc;
3942 }
3943
3944 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3945 {
3946 int rc = X86EMUL_CONTINUE;
3947
3948 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3949 rc = jmp_rel(ctxt, ctxt->src.val);
3950
3951 return rc;
3952 }
3953
3954 static int em_in(struct x86_emulate_ctxt *ctxt)
3955 {
3956 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3957 &ctxt->dst.val))
3958 return X86EMUL_IO_NEEDED;
3959
3960 return X86EMUL_CONTINUE;
3961 }
3962
3963 static int em_out(struct x86_emulate_ctxt *ctxt)
3964 {
3965 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3966 &ctxt->src.val, 1);
3967
3968 ctxt->dst.type = OP_NONE;
3969 return X86EMUL_CONTINUE;
3970 }
3971
3972 static int em_cli(struct x86_emulate_ctxt *ctxt)
3973 {
3974 if (emulator_bad_iopl(ctxt))
3975 return emulate_gp(ctxt, 0);
3976
3977 ctxt->eflags &= ~X86_EFLAGS_IF;
3978 return X86EMUL_CONTINUE;
3979 }
3980
3981 static int em_sti(struct x86_emulate_ctxt *ctxt)
3982 {
3983 if (emulator_bad_iopl(ctxt))
3984 return emulate_gp(ctxt, 0);
3985
3986 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3987 ctxt->eflags |= X86_EFLAGS_IF;
3988 return X86EMUL_CONTINUE;
3989 }
3990
3991 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3992 {
3993 u32 eax, ebx, ecx, edx;
3994 u64 msr = 0;
3995
3996 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3997 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3998 ctxt->ops->cpl(ctxt)) {
3999 return emulate_gp(ctxt, 0);
4000 }
4001
4002 eax = reg_read(ctxt, VCPU_REGS_RAX);
4003 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4004 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
4005 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
4006 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
4007 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
4008 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
4009 return X86EMUL_CONTINUE;
4010 }
4011
4012 static int em_sahf(struct x86_emulate_ctxt *ctxt)
4013 {
4014 u32 flags;
4015
4016 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4017 X86_EFLAGS_SF;
4018 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
4019
4020 ctxt->eflags &= ~0xffUL;
4021 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
4022 return X86EMUL_CONTINUE;
4023 }
4024
4025 static int em_lahf(struct x86_emulate_ctxt *ctxt)
4026 {
4027 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
4028 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
4029 return X86EMUL_CONTINUE;
4030 }
4031
4032 static int em_bswap(struct x86_emulate_ctxt *ctxt)
4033 {
4034 switch (ctxt->op_bytes) {
4035 #ifdef CONFIG_X86_64
4036 case 8:
4037 asm("bswap %0" : "+r"(ctxt->dst.val));
4038 break;
4039 #endif
4040 default:
4041 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
4042 break;
4043 }
4044 return X86EMUL_CONTINUE;
4045 }
4046
4047 static int em_clflush(struct x86_emulate_ctxt *ctxt)
4048 {
4049
4050 return X86EMUL_CONTINUE;
4051 }
4052
4053 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
4054 {
4055 ctxt->dst.val = (s32) ctxt->src.val;
4056 return X86EMUL_CONTINUE;
4057 }
4058
4059 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
4060 {
4061 u32 eax = 1, ebx, ecx = 0, edx;
4062
4063 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4064 if (!(edx & FFL(FXSR)))
4065 return emulate_ud(ctxt);
4066
4067 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
4068 return emulate_nm(ctxt);
4069
4070
4071
4072
4073
4074 if (ctxt->mode >= X86EMUL_MODE_PROT64)
4075 return X86EMUL_UNHANDLEABLE;
4076
4077 return X86EMUL_CONTINUE;
4078 }
4079
4080
4081
4082
4083
4084 static size_t __fxstate_size(int nregs)
4085 {
4086 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4087 }
4088
4089 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4090 {
4091 bool cr4_osfxsr;
4092 if (ctxt->mode == X86EMUL_MODE_PROT64)
4093 return __fxstate_size(16);
4094
4095 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4096 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4097 }
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4118 {
4119 struct fxregs_state fx_state;
4120 int rc;
4121
4122 rc = check_fxsr(ctxt);
4123 if (rc != X86EMUL_CONTINUE)
4124 return rc;
4125
4126 emulator_get_fpu();
4127
4128 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4129
4130 emulator_put_fpu();
4131
4132 if (rc != X86EMUL_CONTINUE)
4133 return rc;
4134
4135 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4136 fxstate_size(ctxt));
4137 }
4138
4139
4140
4141
4142
4143
4144
4145
4146 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4147 const size_t used_size)
4148 {
4149 struct fxregs_state fx_tmp;
4150 int rc;
4151
4152 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4153 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4154 __fxstate_size(16) - used_size);
4155
4156 return rc;
4157 }
4158
4159 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4160 {
4161 struct fxregs_state fx_state;
4162 int rc;
4163 size_t size;
4164
4165 rc = check_fxsr(ctxt);
4166 if (rc != X86EMUL_CONTINUE)
4167 return rc;
4168
4169 size = fxstate_size(ctxt);
4170 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4171 if (rc != X86EMUL_CONTINUE)
4172 return rc;
4173
4174 emulator_get_fpu();
4175
4176 if (size < __fxstate_size(16)) {
4177 rc = fxregs_fixup(&fx_state, size);
4178 if (rc != X86EMUL_CONTINUE)
4179 goto out;
4180 }
4181
4182 if (fx_state.mxcsr >> 16) {
4183 rc = emulate_gp(ctxt, 0);
4184 goto out;
4185 }
4186
4187 if (rc == X86EMUL_CONTINUE)
4188 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4189
4190 out:
4191 emulator_put_fpu();
4192
4193 return rc;
4194 }
4195
4196 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4197 {
4198 u32 eax, ecx, edx;
4199
4200 eax = reg_read(ctxt, VCPU_REGS_RAX);
4201 edx = reg_read(ctxt, VCPU_REGS_RDX);
4202 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4203
4204 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4205 return emulate_gp(ctxt, 0);
4206
4207 return X86EMUL_CONTINUE;
4208 }
4209
4210 static bool valid_cr(int nr)
4211 {
4212 switch (nr) {
4213 case 0:
4214 case 2 ... 4:
4215 case 8:
4216 return true;
4217 default:
4218 return false;
4219 }
4220 }
4221
4222 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
4223 {
4224 if (!valid_cr(ctxt->modrm_reg))
4225 return emulate_ud(ctxt);
4226
4227 return X86EMUL_CONTINUE;
4228 }
4229
4230 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
4231 {
4232 u64 new_val = ctxt->src.val64;
4233 int cr = ctxt->modrm_reg;
4234 u64 efer = 0;
4235
4236 static u64 cr_reserved_bits[] = {
4237 0xffffffff00000000ULL,
4238 0, 0, 0,
4239 CR4_RESERVED_BITS,
4240 0, 0, 0,
4241 CR8_RESERVED_BITS,
4242 };
4243
4244 if (!valid_cr(cr))
4245 return emulate_ud(ctxt);
4246
4247 if (new_val & cr_reserved_bits[cr])
4248 return emulate_gp(ctxt, 0);
4249
4250 switch (cr) {
4251 case 0: {
4252 u64 cr4;
4253 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
4254 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
4255 return emulate_gp(ctxt, 0);
4256
4257 cr4 = ctxt->ops->get_cr(ctxt, 4);
4258 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4259
4260 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
4261 !(cr4 & X86_CR4_PAE))
4262 return emulate_gp(ctxt, 0);
4263
4264 break;
4265 }
4266 case 3: {
4267 u64 rsvd = 0;
4268
4269 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4270 if (efer & EFER_LMA) {
4271 u64 maxphyaddr;
4272 u32 eax, ebx, ecx, edx;
4273
4274 eax = 0x80000008;
4275 ecx = 0;
4276 if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
4277 &edx, false))
4278 maxphyaddr = eax & 0xff;
4279 else
4280 maxphyaddr = 36;
4281 rsvd = rsvd_bits(maxphyaddr, 63);
4282 if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4283 rsvd &= ~X86_CR3_PCID_NOFLUSH;
4284 }
4285
4286 if (new_val & rsvd)
4287 return emulate_gp(ctxt, 0);
4288
4289 break;
4290 }
4291 case 4: {
4292 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4293
4294 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
4295 return emulate_gp(ctxt, 0);
4296
4297 break;
4298 }
4299 }
4300
4301 return X86EMUL_CONTINUE;
4302 }
4303
4304 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4305 {
4306 unsigned long dr7;
4307
4308 ctxt->ops->get_dr(ctxt, 7, &dr7);
4309
4310
4311 return dr7 & (1 << 13);
4312 }
4313
4314 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4315 {
4316 int dr = ctxt->modrm_reg;
4317 u64 cr4;
4318
4319 if (dr > 7)
4320 return emulate_ud(ctxt);
4321
4322 cr4 = ctxt->ops->get_cr(ctxt, 4);
4323 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4324 return emulate_ud(ctxt);
4325
4326 if (check_dr7_gd(ctxt)) {
4327 ulong dr6;
4328
4329 ctxt->ops->get_dr(ctxt, 6, &dr6);
4330 dr6 &= ~DR_TRAP_BITS;
4331 dr6 |= DR6_BD | DR6_RTM;
4332 ctxt->ops->set_dr(ctxt, 6, dr6);
4333 return emulate_db(ctxt);
4334 }
4335
4336 return X86EMUL_CONTINUE;
4337 }
4338
4339 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4340 {
4341 u64 new_val = ctxt->src.val64;
4342 int dr = ctxt->modrm_reg;
4343
4344 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4345 return emulate_gp(ctxt, 0);
4346
4347 return check_dr_read(ctxt);
4348 }
4349
4350 static int check_svme(struct x86_emulate_ctxt *ctxt)
4351 {
4352 u64 efer = 0;
4353
4354 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4355
4356 if (!(efer & EFER_SVME))
4357 return emulate_ud(ctxt);
4358
4359 return X86EMUL_CONTINUE;
4360 }
4361
4362 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4363 {
4364 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4365
4366
4367 if (rax & 0xffff000000000000ULL)
4368 return emulate_gp(ctxt, 0);
4369
4370 return check_svme(ctxt);
4371 }
4372
4373 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4374 {
4375 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4376
4377 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4378 return emulate_ud(ctxt);
4379
4380 return X86EMUL_CONTINUE;
4381 }
4382
4383 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4384 {
4385 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4386 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4387
4388
4389
4390
4391
4392 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4393 return X86EMUL_CONTINUE;
4394
4395 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4396 ctxt->ops->check_pmc(ctxt, rcx))
4397 return emulate_gp(ctxt, 0);
4398
4399 return X86EMUL_CONTINUE;
4400 }
4401
4402 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4403 {
4404 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4405 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4406 return emulate_gp(ctxt, 0);
4407
4408 return X86EMUL_CONTINUE;
4409 }
4410
4411 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4412 {
4413 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4414 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4415 return emulate_gp(ctxt, 0);
4416
4417 return X86EMUL_CONTINUE;
4418 }
4419
4420 #define D(_y) { .flags = (_y) }
4421 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4422 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4423 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4424 #define N D(NotImpl)
4425 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4426 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4427 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4428 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4429 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4430 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4431 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4432 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4433 #define II(_f, _e, _i) \
4434 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4435 #define IIP(_f, _e, _i, _p) \
4436 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4437 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4438 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4439
4440 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4441 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4442 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4443 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4444 #define I2bvIP(_f, _e, _i, _p) \
4445 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4446
4447 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4448 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4449 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4450
4451 static const struct opcode group7_rm0[] = {
4452 N,
4453 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4454 N, N, N, N, N, N,
4455 };
4456
4457 static const struct opcode group7_rm1[] = {
4458 DI(SrcNone | Priv, monitor),
4459 DI(SrcNone | Priv, mwait),
4460 N, N, N, N, N, N,
4461 };
4462
4463 static const struct opcode group7_rm2[] = {
4464 N,
4465 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4466 N, N, N, N, N, N,
4467 };
4468
4469 static const struct opcode group7_rm3[] = {
4470 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4471 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4472 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4473 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4474 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4475 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4476 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4477 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4478 };
4479
4480 static const struct opcode group7_rm7[] = {
4481 N,
4482 DIP(SrcNone, rdtscp, check_rdtsc),
4483 N, N, N, N, N, N,
4484 };
4485
4486 static const struct opcode group1[] = {
4487 F(Lock, em_add),
4488 F(Lock | PageTable, em_or),
4489 F(Lock, em_adc),
4490 F(Lock, em_sbb),
4491 F(Lock | PageTable, em_and),
4492 F(Lock, em_sub),
4493 F(Lock, em_xor),
4494 F(NoWrite, em_cmp),
4495 };
4496
4497 static const struct opcode group1A[] = {
4498 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4499 };
4500
4501 static const struct opcode group2[] = {
4502 F(DstMem | ModRM, em_rol),
4503 F(DstMem | ModRM, em_ror),
4504 F(DstMem | ModRM, em_rcl),
4505 F(DstMem | ModRM, em_rcr),
4506 F(DstMem | ModRM, em_shl),
4507 F(DstMem | ModRM, em_shr),
4508 F(DstMem | ModRM, em_shl),
4509 F(DstMem | ModRM, em_sar),
4510 };
4511
4512 static const struct opcode group3[] = {
4513 F(DstMem | SrcImm | NoWrite, em_test),
4514 F(DstMem | SrcImm | NoWrite, em_test),
4515 F(DstMem | SrcNone | Lock, em_not),
4516 F(DstMem | SrcNone | Lock, em_neg),
4517 F(DstXacc | Src2Mem, em_mul_ex),
4518 F(DstXacc | Src2Mem, em_imul_ex),
4519 F(DstXacc | Src2Mem, em_div_ex),
4520 F(DstXacc | Src2Mem, em_idiv_ex),
4521 };
4522
4523 static const struct opcode group4[] = {
4524 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4525 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4526 N, N, N, N, N, N,
4527 };
4528
4529 static const struct opcode group5[] = {
4530 F(DstMem | SrcNone | Lock, em_inc),
4531 F(DstMem | SrcNone | Lock, em_dec),
4532 I(SrcMem | NearBranch, em_call_near_abs),
4533 I(SrcMemFAddr | ImplicitOps, em_call_far),
4534 I(SrcMem | NearBranch, em_jmp_abs),
4535 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
4536 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4537 };
4538
4539 static const struct opcode group6[] = {
4540 II(Prot | DstMem, em_sldt, sldt),
4541 II(Prot | DstMem, em_str, str),
4542 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4543 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4544 N, N, N, N,
4545 };
4546
4547 static const struct group_dual group7 = { {
4548 II(Mov | DstMem, em_sgdt, sgdt),
4549 II(Mov | DstMem, em_sidt, sidt),
4550 II(SrcMem | Priv, em_lgdt, lgdt),
4551 II(SrcMem | Priv, em_lidt, lidt),
4552 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4553 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4554 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4555 }, {
4556 EXT(0, group7_rm0),
4557 EXT(0, group7_rm1),
4558 EXT(0, group7_rm2),
4559 EXT(0, group7_rm3),
4560 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4561 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4562 EXT(0, group7_rm7),
4563 } };
4564
4565 static const struct opcode group8[] = {
4566 N, N, N, N,
4567 F(DstMem | SrcImmByte | NoWrite, em_bt),
4568 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4569 F(DstMem | SrcImmByte | Lock, em_btr),
4570 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4571 };
4572
4573
4574
4575
4576
4577 static const struct gprefix pfx_0f_c7_7 = {
4578 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
4579 };
4580
4581
4582 static const struct group_dual group9 = { {
4583 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4584 }, {
4585 N, N, N, N, N, N, N,
4586 GP(0, &pfx_0f_c7_7),
4587 } };
4588
4589 static const struct opcode group11[] = {
4590 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4591 X7(D(Undefined)),
4592 };
4593
4594 static const struct gprefix pfx_0f_ae_7 = {
4595 I(SrcMem | ByteOp, em_clflush), N, N, N,
4596 };
4597
4598 static const struct group_dual group15 = { {
4599 I(ModRM | Aligned16, em_fxsave),
4600 I(ModRM | Aligned16, em_fxrstor),
4601 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4602 }, {
4603 N, N, N, N, N, N, N, N,
4604 } };
4605
4606 static const struct gprefix pfx_0f_6f_0f_7f = {
4607 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4608 };
4609
4610 static const struct instr_dual instr_dual_0f_2b = {
4611 I(0, em_mov), N
4612 };
4613
4614 static const struct gprefix pfx_0f_2b = {
4615 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4616 };
4617
4618 static const struct gprefix pfx_0f_10_0f_11 = {
4619 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4620 };
4621
4622 static const struct gprefix pfx_0f_28_0f_29 = {
4623 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4624 };
4625
4626 static const struct gprefix pfx_0f_e7 = {
4627 N, I(Sse, em_mov), N, N,
4628 };
4629
4630 static const struct escape escape_d9 = { {
4631 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4632 }, {
4633
4634 N, N, N, N, N, N, N, N,
4635
4636 N, N, N, N, N, N, N, N,
4637
4638 N, N, N, N, N, N, N, N,
4639
4640 N, N, N, N, N, N, N, N,
4641
4642 N, N, N, N, N, N, N, N,
4643
4644 N, N, N, N, N, N, N, N,
4645
4646 N, N, N, N, N, N, N, N,
4647
4648 N, N, N, N, N, N, N, N,
4649 } };
4650
4651 static const struct escape escape_db = { {
4652 N, N, N, N, N, N, N, N,
4653 }, {
4654
4655 N, N, N, N, N, N, N, N,
4656
4657 N, N, N, N, N, N, N, N,
4658
4659 N, N, N, N, N, N, N, N,
4660
4661 N, N, N, N, N, N, N, N,
4662
4663 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4664
4665 N, N, N, N, N, N, N, N,
4666
4667 N, N, N, N, N, N, N, N,
4668
4669 N, N, N, N, N, N, N, N,
4670 } };
4671
4672 static const struct escape escape_dd = { {
4673 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4674 }, {
4675
4676 N, N, N, N, N, N, N, N,
4677
4678 N, N, N, N, N, N, N, N,
4679
4680 N, N, N, N, N, N, N, N,
4681
4682 N, N, N, N, N, N, N, N,
4683
4684 N, N, N, N, N, N, N, N,
4685
4686 N, N, N, N, N, N, N, N,
4687
4688 N, N, N, N, N, N, N, N,
4689
4690 N, N, N, N, N, N, N, N,
4691 } };
4692
4693 static const struct instr_dual instr_dual_0f_c3 = {
4694 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4695 };
4696
4697 static const struct mode_dual mode_dual_63 = {
4698 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4699 };
4700
4701 static const struct opcode opcode_table[256] = {
4702
4703 F6ALU(Lock, em_add),
4704 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4705 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4706
4707 F6ALU(Lock | PageTable, em_or),
4708 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4709 N,
4710
4711 F6ALU(Lock, em_adc),
4712 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4713 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4714
4715 F6ALU(Lock, em_sbb),
4716 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4717 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4718
4719 F6ALU(Lock | PageTable, em_and), N, N,
4720
4721 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4722
4723 F6ALU(Lock, em_xor), N, N,
4724
4725 F6ALU(NoWrite, em_cmp), N, N,
4726
4727 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4728
4729 X8(I(SrcReg | Stack, em_push)),
4730
4731 X8(I(DstReg | Stack, em_pop)),
4732
4733 I(ImplicitOps | Stack | No64, em_pusha),
4734 I(ImplicitOps | Stack | No64, em_popa),
4735 N, MD(ModRM, &mode_dual_63),
4736 N, N, N, N,
4737
4738 I(SrcImm | Mov | Stack, em_push),
4739 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4740 I(SrcImmByte | Mov | Stack, em_push),
4741 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4742 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in),
4743 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out),
4744
4745 X16(D(SrcImmByte | NearBranch)),
4746
4747 G(ByteOp | DstMem | SrcImm, group1),
4748 G(DstMem | SrcImm, group1),
4749 G(ByteOp | DstMem | SrcImm | No64, group1),
4750 G(DstMem | SrcImmByte, group1),
4751 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4752 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4753
4754 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4755 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4756 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4757 D(ModRM | SrcMem | NoAccess | DstReg),
4758 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4759 G(0, group1A),
4760
4761 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4762
4763 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4764 I(SrcImmFAddr | No64, em_call_far), N,
4765 II(ImplicitOps | Stack, em_pushf, pushf),
4766 II(ImplicitOps | Stack, em_popf, popf),
4767 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4768
4769 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4770 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4771 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4772 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4773
4774 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4775 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4776 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4777 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4778
4779 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4780
4781 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4782
4783 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4784 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4785 I(ImplicitOps | NearBranch, em_ret),
4786 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4787 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4788 G(ByteOp, group11), G(0, group11),
4789
4790 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4791 I(ImplicitOps | SrcImmU16, em_ret_far_imm),
4792 I(ImplicitOps, em_ret_far),
4793 D(ImplicitOps), DI(SrcImmByte, intn),
4794 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4795
4796 G(Src2One | ByteOp, group2), G(Src2One, group2),
4797 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4798 I(DstAcc | SrcImmUByte | No64, em_aam),
4799 I(DstAcc | SrcImmUByte | No64, em_aad),
4800 F(DstAcc | ByteOp | No64, em_salc),
4801 I(DstAcc | SrcXLat | ByteOp, em_mov),
4802
4803 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4804
4805 X3(I(SrcImmByte | NearBranch, em_loop)),
4806 I(SrcImmByte | NearBranch, em_jcxz),
4807 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4808 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4809
4810 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4811 I(SrcImmFAddr | No64, em_jmp_far),
4812 D(SrcImmByte | ImplicitOps | NearBranch),
4813 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4814 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4815
4816 N, DI(ImplicitOps, icebp), N, N,
4817 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4818 G(ByteOp, group3), G(0, group3),
4819
4820 D(ImplicitOps), D(ImplicitOps),
4821 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4822 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4823 };
4824
4825 static const struct opcode twobyte_table[256] = {
4826
4827 G(0, group6), GD(0, &group7), N, N,
4828 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4829 II(ImplicitOps | Priv, em_clts, clts), N,
4830 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4831 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4832
4833 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4834 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4835 N, N, N, N, N, N,
4836 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4837 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4838
4839 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4840 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4841 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4842 check_cr_write),
4843 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4844 check_dr_write),
4845 N, N, N, N,
4846 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4847 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4848 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4849 N, N, N, N,
4850
4851 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4852 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4853 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4854 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4855 I(ImplicitOps | EmulateOnUD, em_sysenter),
4856 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4857 N, N,
4858 N, N, N, N, N, N, N, N,
4859
4860 X16(D(DstReg | SrcMem | ModRM)),
4861
4862 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4863
4864 N, N, N, N,
4865 N, N, N, N,
4866 N, N, N, N,
4867 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4868
4869 N, N, N, N,
4870 N, N, N, N,
4871 N, N, N, N,
4872 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4873
4874 X16(D(SrcImm | NearBranch)),
4875
4876 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4877
4878 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4879 II(ImplicitOps, em_cpuid, cpuid),
4880 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4881 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4882 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4883
4884 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4885 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4886 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4887 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4888 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4889 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4890
4891 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4892 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4893 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4894 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4895 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4896 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4897
4898 N, N,
4899 G(BitOp, group8),
4900 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4901 I(DstReg | SrcMem | ModRM, em_bsf_c),
4902 I(DstReg | SrcMem | ModRM, em_bsr_c),
4903 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4904
4905 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4906 N, ID(0, &instr_dual_0f_c3),
4907 N, N, N, GD(0, &group9),
4908
4909 X8(I(DstReg, em_bswap)),
4910
4911 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4912
4913 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4914 N, N, N, N, N, N, N, N,
4915
4916 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4917 };
4918
4919 static const struct instr_dual instr_dual_0f_38_f0 = {
4920 I(DstReg | SrcMem | Mov, em_movbe), N
4921 };
4922
4923 static const struct instr_dual instr_dual_0f_38_f1 = {
4924 I(DstMem | SrcReg | Mov, em_movbe), N
4925 };
4926
4927 static const struct gprefix three_byte_0f_38_f0 = {
4928 ID(0, &instr_dual_0f_38_f0), N, N, N
4929 };
4930
4931 static const struct gprefix three_byte_0f_38_f1 = {
4932 ID(0, &instr_dual_0f_38_f1), N, N, N
4933 };
4934
4935
4936
4937
4938
4939 static const struct opcode opcode_map_0f_38[256] = {
4940
4941 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4942
4943 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4944
4945 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4946 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4947
4948 N, N, X4(N), X8(N)
4949 };
4950
4951 #undef D
4952 #undef N
4953 #undef G
4954 #undef GD
4955 #undef I
4956 #undef GP
4957 #undef EXT
4958 #undef MD
4959 #undef ID
4960
4961 #undef D2bv
4962 #undef D2bvIP
4963 #undef I2bv
4964 #undef I2bvIP
4965 #undef I6ALU
4966
4967 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4968 {
4969 unsigned size;
4970
4971 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4972 if (size == 8)
4973 size = 4;
4974 return size;
4975 }
4976
4977 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4978 unsigned size, bool sign_extension)
4979 {
4980 int rc = X86EMUL_CONTINUE;
4981
4982 op->type = OP_IMM;
4983 op->bytes = size;
4984 op->addr.mem.ea = ctxt->_eip;
4985
4986 switch (op->bytes) {
4987 case 1:
4988 op->val = insn_fetch(s8, ctxt);
4989 break;
4990 case 2:
4991 op->val = insn_fetch(s16, ctxt);
4992 break;
4993 case 4:
4994 op->val = insn_fetch(s32, ctxt);
4995 break;
4996 case 8:
4997 op->val = insn_fetch(s64, ctxt);
4998 break;
4999 }
5000 if (!sign_extension) {
5001 switch (op->bytes) {
5002 case 1:
5003 op->val &= 0xff;
5004 break;
5005 case 2:
5006 op->val &= 0xffff;
5007 break;
5008 case 4:
5009 op->val &= 0xffffffff;
5010 break;
5011 }
5012 }
5013 done:
5014 return rc;
5015 }
5016
5017 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
5018 unsigned d)
5019 {
5020 int rc = X86EMUL_CONTINUE;
5021
5022 switch (d) {
5023 case OpReg:
5024 decode_register_operand(ctxt, op);
5025 break;
5026 case OpImmUByte:
5027 rc = decode_imm(ctxt, op, 1, false);
5028 break;
5029 case OpMem:
5030 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5031 mem_common:
5032 *op = ctxt->memop;
5033 ctxt->memopp = op;
5034 if (ctxt->d & BitOp)
5035 fetch_bit_operand(ctxt);
5036 op->orig_val = op->val;
5037 break;
5038 case OpMem64:
5039 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5040 goto mem_common;
5041 case OpAcc:
5042 op->type = OP_REG;
5043 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5044 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5045 fetch_register_operand(op);
5046 op->orig_val = op->val;
5047 break;
5048 case OpAccLo:
5049 op->type = OP_REG;
5050 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
5051 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5052 fetch_register_operand(op);
5053 op->orig_val = op->val;
5054 break;
5055 case OpAccHi:
5056 if (ctxt->d & ByteOp) {
5057 op->type = OP_NONE;
5058 break;
5059 }
5060 op->type = OP_REG;
5061 op->bytes = ctxt->op_bytes;
5062 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5063 fetch_register_operand(op);
5064 op->orig_val = op->val;
5065 break;
5066 case OpDI:
5067 op->type = OP_MEM;
5068 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5069 op->addr.mem.ea =
5070 register_address(ctxt, VCPU_REGS_RDI);
5071 op->addr.mem.seg = VCPU_SREG_ES;
5072 op->val = 0;
5073 op->count = 1;
5074 break;
5075 case OpDX:
5076 op->type = OP_REG;
5077 op->bytes = 2;
5078 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5079 fetch_register_operand(op);
5080 break;
5081 case OpCL:
5082 op->type = OP_IMM;
5083 op->bytes = 1;
5084 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5085 break;
5086 case OpImmByte:
5087 rc = decode_imm(ctxt, op, 1, true);
5088 break;
5089 case OpOne:
5090 op->type = OP_IMM;
5091 op->bytes = 1;
5092 op->val = 1;
5093 break;
5094 case OpImm:
5095 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
5096 break;
5097 case OpImm64:
5098 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
5099 break;
5100 case OpMem8:
5101 ctxt->memop.bytes = 1;
5102 if (ctxt->memop.type == OP_REG) {
5103 ctxt->memop.addr.reg = decode_register(ctxt,
5104 ctxt->modrm_rm, true);
5105 fetch_register_operand(&ctxt->memop);
5106 }
5107 goto mem_common;
5108 case OpMem16:
5109 ctxt->memop.bytes = 2;
5110 goto mem_common;
5111 case OpMem32:
5112 ctxt->memop.bytes = 4;
5113 goto mem_common;
5114 case OpImmU16:
5115 rc = decode_imm(ctxt, op, 2, false);
5116 break;
5117 case OpImmU:
5118 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
5119 break;
5120 case OpSI:
5121 op->type = OP_MEM;
5122 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5123 op->addr.mem.ea =
5124 register_address(ctxt, VCPU_REGS_RSI);
5125 op->addr.mem.seg = ctxt->seg_override;
5126 op->val = 0;
5127 op->count = 1;
5128 break;
5129 case OpXLat:
5130 op->type = OP_MEM;
5131 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5132 op->addr.mem.ea =
5133 address_mask(ctxt,
5134 reg_read(ctxt, VCPU_REGS_RBX) +
5135 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
5136 op->addr.mem.seg = ctxt->seg_override;
5137 op->val = 0;
5138 break;
5139 case OpImmFAddr:
5140 op->type = OP_IMM;
5141 op->addr.mem.ea = ctxt->_eip;
5142 op->bytes = ctxt->op_bytes + 2;
5143 insn_fetch_arr(op->valptr, op->bytes, ctxt);
5144 break;
5145 case OpMemFAddr:
5146 ctxt->memop.bytes = ctxt->op_bytes + 2;
5147 goto mem_common;
5148 case OpES:
5149 op->type = OP_IMM;
5150 op->val = VCPU_SREG_ES;
5151 break;
5152 case OpCS:
5153 op->type = OP_IMM;
5154 op->val = VCPU_SREG_CS;
5155 break;
5156 case OpSS:
5157 op->type = OP_IMM;
5158 op->val = VCPU_SREG_SS;
5159 break;
5160 case OpDS:
5161 op->type = OP_IMM;
5162 op->val = VCPU_SREG_DS;
5163 break;
5164 case OpFS:
5165 op->type = OP_IMM;
5166 op->val = VCPU_SREG_FS;
5167 break;
5168 case OpGS:
5169 op->type = OP_IMM;
5170 op->val = VCPU_SREG_GS;
5171 break;
5172 case OpImplicit:
5173
5174 default:
5175 op->type = OP_NONE;
5176 break;
5177 }
5178
5179 done:
5180 return rc;
5181 }
5182
5183 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5184 {
5185 int rc = X86EMUL_CONTINUE;
5186 int mode = ctxt->mode;
5187 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5188 bool op_prefix = false;
5189 bool has_seg_override = false;
5190 struct opcode opcode;
5191 u16 dummy;
5192 struct desc_struct desc;
5193
5194 ctxt->memop.type = OP_NONE;
5195 ctxt->memopp = NULL;
5196 ctxt->_eip = ctxt->eip;
5197 ctxt->fetch.ptr = ctxt->fetch.data;
5198 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5199 ctxt->opcode_len = 1;
5200 ctxt->intercept = x86_intercept_none;
5201 if (insn_len > 0)
5202 memcpy(ctxt->fetch.data, insn, insn_len);
5203 else {
5204 rc = __do_insn_fetch_bytes(ctxt, 1);
5205 if (rc != X86EMUL_CONTINUE)
5206 goto done;
5207 }
5208
5209 switch (mode) {
5210 case X86EMUL_MODE_REAL:
5211 case X86EMUL_MODE_VM86:
5212 def_op_bytes = def_ad_bytes = 2;
5213 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5214 if (desc.d)
5215 def_op_bytes = def_ad_bytes = 4;
5216 break;
5217 case X86EMUL_MODE_PROT16:
5218 def_op_bytes = def_ad_bytes = 2;
5219 break;
5220 case X86EMUL_MODE_PROT32:
5221 def_op_bytes = def_ad_bytes = 4;
5222 break;
5223 #ifdef CONFIG_X86_64
5224 case X86EMUL_MODE_PROT64:
5225 def_op_bytes = 4;
5226 def_ad_bytes = 8;
5227 break;
5228 #endif
5229 default:
5230 return EMULATION_FAILED;
5231 }
5232
5233 ctxt->op_bytes = def_op_bytes;
5234 ctxt->ad_bytes = def_ad_bytes;
5235
5236
5237 for (;;) {
5238 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5239 case 0x66:
5240 op_prefix = true;
5241
5242 ctxt->op_bytes = def_op_bytes ^ 6;
5243 break;
5244 case 0x67:
5245 if (mode == X86EMUL_MODE_PROT64)
5246
5247 ctxt->ad_bytes = def_ad_bytes ^ 12;
5248 else
5249
5250 ctxt->ad_bytes = def_ad_bytes ^ 6;
5251 break;
5252 case 0x26:
5253 has_seg_override = true;
5254 ctxt->seg_override = VCPU_SREG_ES;
5255 break;
5256 case 0x2e:
5257 has_seg_override = true;
5258 ctxt->seg_override = VCPU_SREG_CS;
5259 break;
5260 case 0x36:
5261 has_seg_override = true;
5262 ctxt->seg_override = VCPU_SREG_SS;
5263 break;
5264 case 0x3e:
5265 has_seg_override = true;
5266 ctxt->seg_override = VCPU_SREG_DS;
5267 break;
5268 case 0x64:
5269 has_seg_override = true;
5270 ctxt->seg_override = VCPU_SREG_FS;
5271 break;
5272 case 0x65:
5273 has_seg_override = true;
5274 ctxt->seg_override = VCPU_SREG_GS;
5275 break;
5276 case 0x40 ... 0x4f:
5277 if (mode != X86EMUL_MODE_PROT64)
5278 goto done_prefixes;
5279 ctxt->rex_prefix = ctxt->b;
5280 continue;
5281 case 0xf0:
5282 ctxt->lock_prefix = 1;
5283 break;
5284 case 0xf2:
5285 case 0xf3:
5286 ctxt->rep_prefix = ctxt->b;
5287 break;
5288 default:
5289 goto done_prefixes;
5290 }
5291
5292
5293
5294 ctxt->rex_prefix = 0;
5295 }
5296
5297 done_prefixes:
5298
5299
5300 if (ctxt->rex_prefix & 8)
5301 ctxt->op_bytes = 8;
5302
5303
5304 opcode = opcode_table[ctxt->b];
5305
5306 if (ctxt->b == 0x0f) {
5307 ctxt->opcode_len = 2;
5308 ctxt->b = insn_fetch(u8, ctxt);
5309 opcode = twobyte_table[ctxt->b];
5310
5311
5312 if (ctxt->b == 0x38) {
5313 ctxt->opcode_len = 3;
5314 ctxt->b = insn_fetch(u8, ctxt);
5315 opcode = opcode_map_0f_38[ctxt->b];
5316 }
5317 }
5318 ctxt->d = opcode.flags;
5319
5320 if (ctxt->d & ModRM)
5321 ctxt->modrm = insn_fetch(u8, ctxt);
5322
5323
5324 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5325 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5326 ctxt->d = NotImpl;
5327 }
5328
5329 while (ctxt->d & GroupMask) {
5330 switch (ctxt->d & GroupMask) {
5331 case Group:
5332 goffset = (ctxt->modrm >> 3) & 7;
5333 opcode = opcode.u.group[goffset];
5334 break;
5335 case GroupDual:
5336 goffset = (ctxt->modrm >> 3) & 7;
5337 if ((ctxt->modrm >> 6) == 3)
5338 opcode = opcode.u.gdual->mod3[goffset];
5339 else
5340 opcode = opcode.u.gdual->mod012[goffset];
5341 break;
5342 case RMExt:
5343 goffset = ctxt->modrm & 7;
5344 opcode = opcode.u.group[goffset];
5345 break;
5346 case Prefix:
5347 if (ctxt->rep_prefix && op_prefix)
5348 return EMULATION_FAILED;
5349 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5350 switch (simd_prefix) {
5351 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5352 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5353 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5354 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5355 }
5356 break;
5357 case Escape:
5358 if (ctxt->modrm > 0xbf) {
5359 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5360 u32 index = array_index_nospec(
5361 ctxt->modrm - 0xc0, size);
5362
5363 opcode = opcode.u.esc->high[index];
5364 } else {
5365 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5366 }
5367 break;
5368 case InstrDual:
5369 if ((ctxt->modrm >> 6) == 3)
5370 opcode = opcode.u.idual->mod3;
5371 else
5372 opcode = opcode.u.idual->mod012;
5373 break;
5374 case ModeDual:
5375 if (ctxt->mode == X86EMUL_MODE_PROT64)
5376 opcode = opcode.u.mdual->mode64;
5377 else
5378 opcode = opcode.u.mdual->mode32;
5379 break;
5380 default:
5381 return EMULATION_FAILED;
5382 }
5383
5384 ctxt->d &= ~(u64)GroupMask;
5385 ctxt->d |= opcode.flags;
5386 }
5387
5388
5389 if (ctxt->d == 0)
5390 return EMULATION_FAILED;
5391
5392 ctxt->execute = opcode.u.execute;
5393
5394 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
5395 return EMULATION_FAILED;
5396
5397 if (unlikely(ctxt->d &
5398 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5399 No16))) {
5400
5401
5402
5403
5404 ctxt->check_perm = opcode.check_perm;
5405 ctxt->intercept = opcode.intercept;
5406
5407 if (ctxt->d & NotImpl)
5408 return EMULATION_FAILED;
5409
5410 if (mode == X86EMUL_MODE_PROT64) {
5411 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5412 ctxt->op_bytes = 8;
5413 else if (ctxt->d & NearBranch)
5414 ctxt->op_bytes = 8;
5415 }
5416
5417 if (ctxt->d & Op3264) {
5418 if (mode == X86EMUL_MODE_PROT64)
5419 ctxt->op_bytes = 8;
5420 else
5421 ctxt->op_bytes = 4;
5422 }
5423
5424 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5425 ctxt->op_bytes = 4;
5426
5427 if (ctxt->d & Sse)
5428 ctxt->op_bytes = 16;
5429 else if (ctxt->d & Mmx)
5430 ctxt->op_bytes = 8;
5431 }
5432
5433
5434 if (ctxt->d & ModRM) {
5435 rc = decode_modrm(ctxt, &ctxt->memop);
5436 if (!has_seg_override) {
5437 has_seg_override = true;
5438 ctxt->seg_override = ctxt->modrm_seg;
5439 }
5440 } else if (ctxt->d & MemAbs)
5441 rc = decode_abs(ctxt, &ctxt->memop);
5442 if (rc != X86EMUL_CONTINUE)
5443 goto done;
5444
5445 if (!has_seg_override)
5446 ctxt->seg_override = VCPU_SREG_DS;
5447
5448 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5449
5450
5451
5452
5453
5454 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5455 if (rc != X86EMUL_CONTINUE)
5456 goto done;
5457
5458
5459
5460
5461
5462 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5463 if (rc != X86EMUL_CONTINUE)
5464 goto done;
5465
5466
5467 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5468
5469 if (ctxt->rip_relative && likely(ctxt->memopp))
5470 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5471 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5472
5473 done:
5474 if (rc == X86EMUL_PROPAGATE_FAULT)
5475 ctxt->have_exception = true;
5476 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5477 }
5478
5479 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5480 {
5481 return ctxt->d & PageTable;
5482 }
5483
5484 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5485 {
5486
5487
5488
5489
5490
5491
5492
5493 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5494 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5495 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5496 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5497 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5498 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5499 return true;
5500
5501 return false;
5502 }
5503
5504 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5505 {
5506 int rc;
5507
5508 emulator_get_fpu();
5509 rc = asm_safe("fwait");
5510 emulator_put_fpu();
5511
5512 if (unlikely(rc != X86EMUL_CONTINUE))
5513 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5514
5515 return X86EMUL_CONTINUE;
5516 }
5517
5518 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
5519 struct operand *op)
5520 {
5521 if (op->type == OP_MM)
5522 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
5523 }
5524
5525 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
5526 {
5527 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5528
5529 if (!(ctxt->d & ByteOp))
5530 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5531
5532 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5533 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5534 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5535 : "c"(ctxt->src2.val));
5536
5537 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5538 if (!fop)
5539 return emulate_de(ctxt);
5540 return X86EMUL_CONTINUE;
5541 }
5542
5543 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5544 {
5545 memset(&ctxt->rip_relative, 0,
5546 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5547
5548 ctxt->io_read.pos = 0;
5549 ctxt->io_read.end = 0;
5550 ctxt->mem_read.end = 0;
5551 }
5552
5553 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5554 {
5555 const struct x86_emulate_ops *ops = ctxt->ops;
5556 int rc = X86EMUL_CONTINUE;
5557 int saved_dst_type = ctxt->dst.type;
5558 unsigned emul_flags;
5559
5560 ctxt->mem_read.pos = 0;
5561
5562
5563 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5564 rc = emulate_ud(ctxt);
5565 goto done;
5566 }
5567
5568 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5569 rc = emulate_ud(ctxt);
5570 goto done;
5571 }
5572
5573 emul_flags = ctxt->ops->get_hflags(ctxt);
5574 if (unlikely(ctxt->d &
5575 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5576 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5577 (ctxt->d & Undefined)) {
5578 rc = emulate_ud(ctxt);
5579 goto done;
5580 }
5581
5582 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5583 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5584 rc = emulate_ud(ctxt);
5585 goto done;
5586 }
5587
5588 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5589 rc = emulate_nm(ctxt);
5590 goto done;
5591 }
5592
5593 if (ctxt->d & Mmx) {
5594 rc = flush_pending_x87_faults(ctxt);
5595 if (rc != X86EMUL_CONTINUE)
5596 goto done;
5597
5598
5599
5600
5601 fetch_possible_mmx_operand(ctxt, &ctxt->src);
5602 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
5603 if (!(ctxt->d & Mov))
5604 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
5605 }
5606
5607 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5608 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5609 X86_ICPT_PRE_EXCEPT);
5610 if (rc != X86EMUL_CONTINUE)
5611 goto done;
5612 }
5613
5614
5615 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5616 rc = emulate_ud(ctxt);
5617 goto done;
5618 }
5619
5620
5621 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5622 if (ctxt->d & PrivUD)
5623 rc = emulate_ud(ctxt);
5624 else
5625 rc = emulate_gp(ctxt, 0);
5626 goto done;
5627 }
5628
5629
5630 if (ctxt->d & CheckPerm) {
5631 rc = ctxt->check_perm(ctxt);
5632 if (rc != X86EMUL_CONTINUE)
5633 goto done;
5634 }
5635
5636 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5637 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5638 X86_ICPT_POST_EXCEPT);
5639 if (rc != X86EMUL_CONTINUE)
5640 goto done;
5641 }
5642
5643 if (ctxt->rep_prefix && (ctxt->d & String)) {
5644
5645 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5646 string_registers_quirk(ctxt);
5647 ctxt->eip = ctxt->_eip;
5648 ctxt->eflags &= ~X86_EFLAGS_RF;
5649 goto done;
5650 }
5651 }
5652 }
5653
5654 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5655 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5656 ctxt->src.valptr, ctxt->src.bytes);
5657 if (rc != X86EMUL_CONTINUE)
5658 goto done;
5659 ctxt->src.orig_val64 = ctxt->src.val64;
5660 }
5661
5662 if (ctxt->src2.type == OP_MEM) {
5663 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5664 &ctxt->src2.val, ctxt->src2.bytes);
5665 if (rc != X86EMUL_CONTINUE)
5666 goto done;
5667 }
5668
5669 if ((ctxt->d & DstMask) == ImplicitOps)
5670 goto special_insn;
5671
5672
5673 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5674
5675 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5676 &ctxt->dst.val, ctxt->dst.bytes);
5677 if (rc != X86EMUL_CONTINUE) {
5678 if (!(ctxt->d & NoWrite) &&
5679 rc == X86EMUL_PROPAGATE_FAULT &&
5680 ctxt->exception.vector == PF_VECTOR)
5681 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5682 goto done;
5683 }
5684 }
5685
5686 ctxt->dst.orig_val64 = ctxt->dst.val64;
5687
5688 special_insn:
5689
5690 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5691 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5692 X86_ICPT_POST_MEMACCESS);
5693 if (rc != X86EMUL_CONTINUE)
5694 goto done;
5695 }
5696
5697 if (ctxt->rep_prefix && (ctxt->d & String))
5698 ctxt->eflags |= X86_EFLAGS_RF;
5699 else
5700 ctxt->eflags &= ~X86_EFLAGS_RF;
5701
5702 if (ctxt->execute) {
5703 if (ctxt->d & Fastop) {
5704 void (*fop)(struct fastop *) = (void *)ctxt->execute;
5705 rc = fastop(ctxt, fop);
5706 if (rc != X86EMUL_CONTINUE)
5707 goto done;
5708 goto writeback;
5709 }
5710 rc = ctxt->execute(ctxt);
5711 if (rc != X86EMUL_CONTINUE)
5712 goto done;
5713 goto writeback;
5714 }
5715
5716 if (ctxt->opcode_len == 2)
5717 goto twobyte_insn;
5718 else if (ctxt->opcode_len == 3)
5719 goto threebyte_insn;
5720
5721 switch (ctxt->b) {
5722 case 0x70 ... 0x7f:
5723 if (test_cc(ctxt->b, ctxt->eflags))
5724 rc = jmp_rel(ctxt, ctxt->src.val);
5725 break;
5726 case 0x8d:
5727 ctxt->dst.val = ctxt->src.addr.mem.ea;
5728 break;
5729 case 0x90 ... 0x97:
5730 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5731 ctxt->dst.type = OP_NONE;
5732 else
5733 rc = em_xchg(ctxt);
5734 break;
5735 case 0x98:
5736 switch (ctxt->op_bytes) {
5737 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5738 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5739 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5740 }
5741 break;
5742 case 0xcc:
5743 rc = emulate_int(ctxt, 3);
5744 break;
5745 case 0xcd:
5746 rc = emulate_int(ctxt, ctxt->src.val);
5747 break;
5748 case 0xce:
5749 if (ctxt->eflags & X86_EFLAGS_OF)
5750 rc = emulate_int(ctxt, 4);
5751 break;
5752 case 0xe9:
5753 case 0xeb:
5754 rc = jmp_rel(ctxt, ctxt->src.val);
5755 ctxt->dst.type = OP_NONE;
5756 break;
5757 case 0xf4:
5758 ctxt->ops->halt(ctxt);
5759 break;
5760 case 0xf5:
5761
5762 ctxt->eflags ^= X86_EFLAGS_CF;
5763 break;
5764 case 0xf8:
5765 ctxt->eflags &= ~X86_EFLAGS_CF;
5766 break;
5767 case 0xf9:
5768 ctxt->eflags |= X86_EFLAGS_CF;
5769 break;
5770 case 0xfc:
5771 ctxt->eflags &= ~X86_EFLAGS_DF;
5772 break;
5773 case 0xfd:
5774 ctxt->eflags |= X86_EFLAGS_DF;
5775 break;
5776 default:
5777 goto cannot_emulate;
5778 }
5779
5780 if (rc != X86EMUL_CONTINUE)
5781 goto done;
5782
5783 writeback:
5784 if (ctxt->d & SrcWrite) {
5785 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5786 rc = writeback(ctxt, &ctxt->src);
5787 if (rc != X86EMUL_CONTINUE)
5788 goto done;
5789 }
5790 if (!(ctxt->d & NoWrite)) {
5791 rc = writeback(ctxt, &ctxt->dst);
5792 if (rc != X86EMUL_CONTINUE)
5793 goto done;
5794 }
5795
5796
5797
5798
5799
5800 ctxt->dst.type = saved_dst_type;
5801
5802 if ((ctxt->d & SrcMask) == SrcSI)
5803 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5804
5805 if ((ctxt->d & DstMask) == DstDI)
5806 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5807
5808 if (ctxt->rep_prefix && (ctxt->d & String)) {
5809 unsigned int count;
5810 struct read_cache *r = &ctxt->io_read;
5811 if ((ctxt->d & SrcMask) == SrcSI)
5812 count = ctxt->src.count;
5813 else
5814 count = ctxt->dst.count;
5815 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5816
5817 if (!string_insn_completed(ctxt)) {
5818
5819
5820
5821
5822 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5823 (r->end == 0 || r->end != r->pos)) {
5824
5825
5826
5827
5828
5829 ctxt->mem_read.end = 0;
5830 writeback_registers(ctxt);
5831 return EMULATION_RESTART;
5832 }
5833 goto done;
5834 }
5835 ctxt->eflags &= ~X86_EFLAGS_RF;
5836 }
5837
5838 ctxt->eip = ctxt->_eip;
5839
5840 done:
5841 if (rc == X86EMUL_PROPAGATE_FAULT) {
5842 WARN_ON(ctxt->exception.vector > 0x1f);
5843 ctxt->have_exception = true;
5844 }
5845 if (rc == X86EMUL_INTERCEPTED)
5846 return EMULATION_INTERCEPTED;
5847
5848 if (rc == X86EMUL_CONTINUE)
5849 writeback_registers(ctxt);
5850
5851 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5852
5853 twobyte_insn:
5854 switch (ctxt->b) {
5855 case 0x09:
5856 (ctxt->ops->wbinvd)(ctxt);
5857 break;
5858 case 0x08:
5859 case 0x0d:
5860 case 0x18:
5861 case 0x1f:
5862 break;
5863 case 0x20:
5864 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5865 break;
5866 case 0x21:
5867 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5868 break;
5869 case 0x40 ... 0x4f:
5870 if (test_cc(ctxt->b, ctxt->eflags))
5871 ctxt->dst.val = ctxt->src.val;
5872 else if (ctxt->op_bytes != 4)
5873 ctxt->dst.type = OP_NONE;
5874 break;
5875 case 0x80 ... 0x8f:
5876 if (test_cc(ctxt->b, ctxt->eflags))
5877 rc = jmp_rel(ctxt, ctxt->src.val);
5878 break;
5879 case 0x90 ... 0x9f:
5880 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5881 break;
5882 case 0xb6 ... 0xb7:
5883 ctxt->dst.bytes = ctxt->op_bytes;
5884 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5885 : (u16) ctxt->src.val;
5886 break;
5887 case 0xbe ... 0xbf:
5888 ctxt->dst.bytes = ctxt->op_bytes;
5889 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5890 (s16) ctxt->src.val;
5891 break;
5892 default:
5893 goto cannot_emulate;
5894 }
5895
5896 threebyte_insn:
5897
5898 if (rc != X86EMUL_CONTINUE)
5899 goto done;
5900
5901 goto writeback;
5902
5903 cannot_emulate:
5904 return EMULATION_FAILED;
5905 }
5906
5907 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5908 {
5909 invalidate_registers(ctxt);
5910 }
5911
5912 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5913 {
5914 writeback_registers(ctxt);
5915 }
5916
5917 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5918 {
5919 if (ctxt->rep_prefix && (ctxt->d & String))
5920 return false;
5921
5922 if (ctxt->d & TwoMemOp)
5923 return false;
5924
5925 return true;
5926 }