Lines Matching refs:svm
208 static void svm_complete_interrupts(struct vcpu_svm *svm);
210 static int nested_svm_exit_handled(struct vcpu_svm *svm);
211 static int nested_svm_intercept(struct vcpu_svm *svm);
212 static int nested_svm_vmexit(struct vcpu_svm *svm);
213 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
256 static void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
261 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
263 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
266 c = &svm->vmcb->control; in recalc_intercepts()
267 h = &svm->nested.hsave->control; in recalc_intercepts()
268 g = &svm->nested; in recalc_intercepts()
276 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) in get_host_vmcb() argument
278 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
279 return svm->nested.hsave; in get_host_vmcb()
281 return svm->vmcb; in get_host_vmcb()
284 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) in set_cr_intercept() argument
286 struct vmcb *vmcb = get_host_vmcb(svm); in set_cr_intercept()
290 recalc_intercepts(svm); in set_cr_intercept()
293 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) in clr_cr_intercept() argument
295 struct vmcb *vmcb = get_host_vmcb(svm); in clr_cr_intercept()
299 recalc_intercepts(svm); in clr_cr_intercept()
302 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) in is_cr_intercept() argument
304 struct vmcb *vmcb = get_host_vmcb(svm); in is_cr_intercept()
309 static inline void set_dr_intercepts(struct vcpu_svm *svm) in set_dr_intercepts() argument
311 struct vmcb *vmcb = get_host_vmcb(svm); in set_dr_intercepts()
330 recalc_intercepts(svm); in set_dr_intercepts()
333 static inline void clr_dr_intercepts(struct vcpu_svm *svm) in clr_dr_intercepts() argument
335 struct vmcb *vmcb = get_host_vmcb(svm); in clr_dr_intercepts()
339 recalc_intercepts(svm); in clr_dr_intercepts()
342 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) in set_exception_intercept() argument
344 struct vmcb *vmcb = get_host_vmcb(svm); in set_exception_intercept()
348 recalc_intercepts(svm); in set_exception_intercept()
351 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) in clr_exception_intercept() argument
353 struct vmcb *vmcb = get_host_vmcb(svm); in clr_exception_intercept()
357 recalc_intercepts(svm); in clr_exception_intercept()
360 static inline void set_intercept(struct vcpu_svm *svm, int bit) in set_intercept() argument
362 struct vmcb *vmcb = get_host_vmcb(svm); in set_intercept()
366 recalc_intercepts(svm); in set_intercept()
369 static inline void clr_intercept(struct vcpu_svm *svm, int bit) in clr_intercept() argument
371 struct vmcb *vmcb = get_host_vmcb(svm); in clr_intercept()
375 recalc_intercepts(svm); in clr_intercept()
378 static inline void enable_gif(struct vcpu_svm *svm) in enable_gif() argument
380 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
383 static inline void disable_gif(struct vcpu_svm *svm) in disable_gif() argument
385 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
388 static inline bool gif_set(struct vcpu_svm *svm) in gif_set() argument
390 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
493 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
496 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
503 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
506 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
508 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
514 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction() local
516 if (svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
518 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction()
521 if (!svm->next_rip) { in skip_emulated_instruction()
527 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) in skip_emulated_instruction()
529 __func__, kvm_rip_read(vcpu), svm->next_rip); in skip_emulated_instruction()
531 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
539 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception() local
546 nested_svm_check_exception(svm, nr, has_error_code, error_code)) in svm_queue_exception()
550 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
559 skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
560 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
561 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception()
562 svm->int3_injected = rip - old_rip; in svm_queue_exception()
565 svm->vmcb->control.event_inj = nr in svm_queue_exception()
569 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception()
847 static void svm_enable_lbrv(struct vcpu_svm *svm) in svm_enable_lbrv() argument
849 u32 *msrpm = svm->msrpm; in svm_enable_lbrv()
851 svm->vmcb->control.lbr_ctl = 1; in svm_enable_lbrv()
858 static void svm_disable_lbrv(struct vcpu_svm *svm) in svm_disable_lbrv() argument
860 u32 *msrpm = svm->msrpm; in svm_disable_lbrv()
862 svm->vmcb->control.lbr_ctl = 0; in svm_disable_lbrv()
962 struct vcpu_svm *svm = to_svm(vcpu); in svm_read_tsc_offset() local
964 return svm->vmcb->control.tsc_offset; in svm_read_tsc_offset()
969 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset() local
973 g_tsc_offset = svm->vmcb->control.tsc_offset - in svm_write_tsc_offset()
974 svm->nested.hsave->control.tsc_offset; in svm_write_tsc_offset()
975 svm->nested.hsave->control.tsc_offset = offset; in svm_write_tsc_offset()
978 svm->vmcb->control.tsc_offset, in svm_write_tsc_offset()
981 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; in svm_write_tsc_offset()
983 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
988 struct vcpu_svm *svm = to_svm(vcpu); in svm_adjust_tsc_offset_guest() local
990 svm->vmcb->control.tsc_offset += adjustment; in svm_adjust_tsc_offset_guest()
992 svm->nested.hsave->control.tsc_offset += adjustment; in svm_adjust_tsc_offset_guest()
995 svm->vmcb->control.tsc_offset - adjustment, in svm_adjust_tsc_offset_guest()
996 svm->vmcb->control.tsc_offset); in svm_adjust_tsc_offset_guest()
998 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_adjust_tsc_offset_guest()
1001 static void init_vmcb(struct vcpu_svm *svm) in init_vmcb() argument
1003 struct vmcb_control_area *control = &svm->vmcb->control; in init_vmcb()
1004 struct vmcb_save_area *save = &svm->vmcb->save; in init_vmcb()
1006 svm->vcpu.fpu_active = 1; in init_vmcb()
1007 svm->vcpu.arch.hflags = 0; in init_vmcb()
1009 set_cr_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1010 set_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1011 set_cr_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1012 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1013 set_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1014 set_cr_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1015 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1017 set_dr_intercepts(svm); in init_vmcb()
1019 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1020 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1021 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1022 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1023 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1025 set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1026 set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1027 set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1028 set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1029 set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1030 set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1031 set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1032 set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1033 set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1034 set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1035 set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1036 set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1037 set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1038 set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1039 set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1040 set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1041 set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1042 set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1043 set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1044 set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1045 set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1046 set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1047 set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1048 set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1049 set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1052 control->msrpm_base_pa = __pa(svm->msrpm); in init_vmcb()
1074 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1076 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1078 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1084 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1085 kvm_mmu_reset_context(&svm->vcpu); in init_vmcb()
1093 clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1094 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1095 clr_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1096 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1097 save->g_pat = svm->vcpu.arch.pat; in init_vmcb()
1101 svm->asid_generation = 0; in init_vmcb()
1103 svm->nested.vmcb = 0; in init_vmcb()
1104 svm->vcpu.arch.hflags = 0; in init_vmcb()
1108 set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1111 mark_all_dirty(svm->vmcb); in init_vmcb()
1113 enable_gif(svm); in init_vmcb()
1118 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1123 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_vcpu_reset()
1125 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_vcpu_reset()
1126 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_vcpu_reset()
1128 init_vmcb(svm); in svm_vcpu_reset()
1136 struct vcpu_svm *svm; in svm_create_vcpu() local
1143 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in svm_create_vcpu()
1144 if (!svm) { in svm_create_vcpu()
1149 err = kvm_vcpu_init(&svm->vcpu, kvm, id); in svm_create_vcpu()
1170 svm->nested.hsave = page_address(hsave_page); in svm_create_vcpu()
1172 svm->msrpm = page_address(msrpm_pages); in svm_create_vcpu()
1173 svm_vcpu_init_msrpm(svm->msrpm); in svm_create_vcpu()
1175 svm->nested.msrpm = page_address(nested_msrpm_pages); in svm_create_vcpu()
1176 svm_vcpu_init_msrpm(svm->nested.msrpm); in svm_create_vcpu()
1178 svm->vmcb = page_address(page); in svm_create_vcpu()
1179 clear_page(svm->vmcb); in svm_create_vcpu()
1180 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; in svm_create_vcpu()
1181 svm->asid_generation = 0; in svm_create_vcpu()
1182 init_vmcb(svm); in svm_create_vcpu()
1184 svm_init_osvw(&svm->vcpu); in svm_create_vcpu()
1186 return &svm->vcpu; in svm_create_vcpu()
1195 kvm_vcpu_uninit(&svm->vcpu); in svm_create_vcpu()
1197 kmem_cache_free(kvm_vcpu_cache, svm); in svm_create_vcpu()
1204 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu() local
1206 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); in svm_free_vcpu()
1207 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1208 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_vcpu()
1209 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1211 kmem_cache_free(kvm_vcpu_cache, svm); in svm_free_vcpu()
1216 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1220 svm->asid_generation = 0; in svm_vcpu_load()
1221 mark_all_dirty(svm->vmcb); in svm_vcpu_load()
1227 savesegment(fs, svm->host.fs); in svm_vcpu_load()
1228 savesegment(gs, svm->host.gs); in svm_vcpu_load()
1229 svm->host.ldt = kvm_read_ldt(); in svm_vcpu_load()
1232 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_load()
1245 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put() local
1249 kvm_load_ldt(svm->host.ldt); in svm_vcpu_put()
1251 loadsegment(fs, svm->host.fs); in svm_vcpu_put()
1253 load_gs_index(svm->host.gs); in svm_vcpu_put()
1256 loadsegment(gs, svm->host.gs); in svm_vcpu_put()
1260 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_put()
1290 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1292 set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1295 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1297 clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1402 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1404 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1405 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1410 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1412 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1413 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1414 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1419 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1421 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1422 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1427 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1429 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1430 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1431 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1446 static void update_cr0_intercept(struct vcpu_svm *svm) in update_cr0_intercept() argument
1448 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1449 u64 *hcr0 = &svm->vmcb->save.cr0; in update_cr0_intercept()
1451 if (!svm->vcpu.fpu_active) in update_cr0_intercept()
1457 mark_dirty(svm->vmcb, VMCB_CR); in update_cr0_intercept()
1459 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { in update_cr0_intercept()
1460 clr_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1461 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1463 set_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1464 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1470 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1476 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1481 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1499 svm->vmcb->save.cr0 = cr0; in svm_set_cr0()
1500 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1501 update_cr0_intercept(svm); in svm_set_cr0()
1527 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1553 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; in svm_set_segment()
1555 mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1560 struct vcpu_svm *svm = to_svm(vcpu); in update_bp_intercept() local
1562 clr_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
1566 set_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
1571 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
1576 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1579 svm->asid_generation = sd->asid_generation; in new_asid()
1580 svm->vmcb->control.asid = sd->next_asid++; in new_asid()
1582 mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1592 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr6() local
1594 svm->vmcb->save.dr6 = value; in svm_set_dr6()
1595 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr6()
1600 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
1607 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1610 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
1615 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
1617 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1618 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1621 static int pf_interception(struct vcpu_svm *svm) in pf_interception() argument
1623 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
1627 switch (svm->apf_reason) { in pf_interception()
1629 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1632 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) in pf_interception()
1633 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); in pf_interception()
1634 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in pf_interception()
1635 svm->vmcb->control.insn_bytes, in pf_interception()
1636 svm->vmcb->control.insn_len); in pf_interception()
1639 svm->apf_reason = 0; in pf_interception()
1645 svm->apf_reason = 0; in pf_interception()
1654 static int db_interception(struct vcpu_svm *svm) in db_interception() argument
1656 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1658 if (!(svm->vcpu.guest_debug & in db_interception()
1660 !svm->nmi_singlestep) { in db_interception()
1661 kvm_queue_exception(&svm->vcpu, DB_VECTOR); in db_interception()
1665 if (svm->nmi_singlestep) { in db_interception()
1666 svm->nmi_singlestep = false; in db_interception()
1667 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) in db_interception()
1668 svm->vmcb->save.rflags &= in db_interception()
1672 if (svm->vcpu.guest_debug & in db_interception()
1676 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
1684 static int bp_interception(struct vcpu_svm *svm) in bp_interception() argument
1686 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1689 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
1694 static int ud_interception(struct vcpu_svm *svm) in ud_interception() argument
1698 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); in ud_interception()
1700 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in ud_interception()
1704 static int ac_interception(struct vcpu_svm *svm) in ac_interception() argument
1706 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1712 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_activate() local
1714 clr_exception_intercept(svm, NM_VECTOR); in svm_fpu_activate()
1716 svm->vcpu.fpu_active = 1; in svm_fpu_activate()
1717 update_cr0_intercept(svm); in svm_fpu_activate()
1720 static int nm_interception(struct vcpu_svm *svm) in nm_interception() argument
1722 svm_fpu_activate(&svm->vcpu); in nm_interception()
1765 static void svm_handle_mce(struct vcpu_svm *svm) in svm_handle_mce() argument
1774 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1790 static int mc_interception(struct vcpu_svm *svm) in mc_interception() argument
1795 static int shutdown_interception(struct vcpu_svm *svm) in shutdown_interception() argument
1797 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1803 clear_page(svm->vmcb); in shutdown_interception()
1804 init_vmcb(svm); in shutdown_interception()
1810 static int io_interception(struct vcpu_svm *svm) in io_interception() argument
1812 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception()
1813 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
1817 ++svm->vcpu.stat.io_exits; in io_interception()
1825 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
1826 skip_emulated_instruction(&svm->vcpu); in io_interception()
1831 static int nmi_interception(struct vcpu_svm *svm) in nmi_interception() argument
1836 static int intr_interception(struct vcpu_svm *svm) in intr_interception() argument
1838 ++svm->vcpu.stat.irq_exits; in intr_interception()
1842 static int nop_on_interception(struct vcpu_svm *svm) in nop_on_interception() argument
1847 static int halt_interception(struct vcpu_svm *svm) in halt_interception() argument
1849 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; in halt_interception()
1850 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
1853 static int vmmcall_interception(struct vcpu_svm *svm) in vmmcall_interception() argument
1855 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmmcall_interception()
1856 kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
1862 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
1864 return svm->nested.nested_cr3; in nested_svm_get_tdp_cr3()
1869 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
1870 u64 cr3 = svm->nested.nested_cr3; in nested_svm_get_tdp_pdptr()
1884 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_set_tdp_cr3() local
1886 svm->vmcb->control.nested_cr3 = root; in nested_svm_set_tdp_cr3()
1887 mark_dirty(svm->vmcb, VMCB_NPT); in nested_svm_set_tdp_cr3()
1894 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
1896 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
1901 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
1902 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
1903 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
1904 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
1907 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
1908 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
1914 if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) in nested_svm_inject_npf_exit()
1915 svm->vmcb->control.exit_info_1 &= ~1; in nested_svm_inject_npf_exit()
1917 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
1938 static int nested_svm_check_permissions(struct vcpu_svm *svm) in nested_svm_check_permissions() argument
1940 if (!(svm->vcpu.arch.efer & EFER_SVME) in nested_svm_check_permissions()
1941 || !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
1942 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
1946 if (svm->vmcb->save.cpl) { in nested_svm_check_permissions()
1947 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
1954 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, in nested_svm_check_exception() argument
1959 if (!is_guest_mode(&svm->vcpu)) in nested_svm_check_exception()
1962 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_check_exception()
1963 svm->vmcb->control.exit_code_hi = 0; in nested_svm_check_exception()
1964 svm->vmcb->control.exit_info_1 = error_code; in nested_svm_check_exception()
1965 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_check_exception()
1967 vmexit = nested_svm_intercept(svm); in nested_svm_check_exception()
1969 svm->nested.exit_required = true; in nested_svm_check_exception()
1975 static inline bool nested_svm_intr(struct vcpu_svm *svm) in nested_svm_intr() argument
1977 if (!is_guest_mode(&svm->vcpu)) in nested_svm_intr()
1980 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_intr()
1983 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) in nested_svm_intr()
1991 if (svm->nested.exit_required) in nested_svm_intr()
1994 svm->vmcb->control.exit_code = SVM_EXIT_INTR; in nested_svm_intr()
1995 svm->vmcb->control.exit_info_1 = 0; in nested_svm_intr()
1996 svm->vmcb->control.exit_info_2 = 0; in nested_svm_intr()
1998 if (svm->nested.intercept & 1ULL) { in nested_svm_intr()
2005 svm->nested.exit_required = true; in nested_svm_intr()
2006 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in nested_svm_intr()
2014 static inline bool nested_svm_nmi(struct vcpu_svm *svm) in nested_svm_nmi() argument
2016 if (!is_guest_mode(&svm->vcpu)) in nested_svm_nmi()
2019 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) in nested_svm_nmi()
2022 svm->vmcb->control.exit_code = SVM_EXIT_NMI; in nested_svm_nmi()
2023 svm->nested.exit_required = true; in nested_svm_nmi()
2028 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) in nested_svm_map() argument
2034 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT); in nested_svm_map()
2043 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_map()
2054 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
2061 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
2064 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
2065 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
2067 gpa = svm->nested.vmcb_iopm + (port / 8); in nested_svm_intercept_ioio()
2073 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
2079 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
2084 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
2087 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
2089 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
2098 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
2104 static int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
2106 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
2120 if (!npt_enabled && svm->apf_reason == 0) in nested_svm_exit_special()
2124 nm_interception(svm); in nested_svm_exit_special()
2136 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
2138 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
2143 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
2146 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
2150 if (svm->nested.intercept_cr & bit) in nested_svm_intercept()
2156 if (svm->nested.intercept_dr & bit) in nested_svm_intercept()
2162 if (svm->nested.intercept_exceptions & excp_bits) in nested_svm_intercept()
2166 svm->apf_reason != 0) in nested_svm_intercept()
2176 if (svm->nested.intercept & exit_bits) in nested_svm_intercept()
2184 static int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
2188 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
2191 nested_svm_vmexit(svm); in nested_svm_exit_handled()
2226 static int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
2229 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
2230 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
2240 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); in nested_svm_vmexit()
2245 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
2246 svm->nested.vmcb = 0; in nested_svm_vmexit()
2249 disable_gif(svm); in nested_svm_vmexit()
2257 nested_vmcb->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
2258 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
2259 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
2261 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
2262 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
2280 if (svm->nrips_enabled) in nested_svm_vmexit()
2303 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_vmexit()
2309 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
2310 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
2312 svm->nested.nested_cr3 = 0; in nested_svm_vmexit()
2315 svm->vmcb->save.es = hsave->save.es; in nested_svm_vmexit()
2316 svm->vmcb->save.cs = hsave->save.cs; in nested_svm_vmexit()
2317 svm->vmcb->save.ss = hsave->save.ss; in nested_svm_vmexit()
2318 svm->vmcb->save.ds = hsave->save.ds; in nested_svm_vmexit()
2319 svm->vmcb->save.gdtr = hsave->save.gdtr; in nested_svm_vmexit()
2320 svm->vmcb->save.idtr = hsave->save.idtr; in nested_svm_vmexit()
2321 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
2322 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
2323 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
2324 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
2326 svm->vmcb->save.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2327 svm->vcpu.arch.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2329 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); in nested_svm_vmexit()
2331 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); in nested_svm_vmexit()
2332 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); in nested_svm_vmexit()
2333 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); in nested_svm_vmexit()
2334 svm->vmcb->save.dr7 = 0; in nested_svm_vmexit()
2335 svm->vmcb->save.cpl = 0; in nested_svm_vmexit()
2336 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
2338 mark_all_dirty(svm->vmcb); in nested_svm_vmexit()
2342 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
2343 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmexit()
2344 kvm_mmu_load(&svm->vcpu); in nested_svm_vmexit()
2349 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
2358 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
2369 offset = svm->nested.vmcb_msrpm + (p * 4); in nested_svm_vmrun_msrpm()
2371 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
2374 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
2377 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); in nested_svm_vmrun_msrpm()
2396 static bool nested_svm_vmrun(struct vcpu_svm *svm) in nested_svm_vmrun() argument
2399 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
2400 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmrun()
2404 vmcb_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
2406 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in nested_svm_vmrun()
2421 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, in nested_svm_vmrun()
2433 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
2434 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
2446 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
2447 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
2448 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
2449 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
2450 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
2456 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
2460 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) in nested_svm_vmrun()
2461 svm->vcpu.arch.hflags |= HF_HIF_MASK; in nested_svm_vmrun()
2463 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; in nested_svm_vmrun()
2466 kvm_mmu_unload(&svm->vcpu); in nested_svm_vmrun()
2467 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; in nested_svm_vmrun()
2468 nested_svm_init_mmu_context(&svm->vcpu); in nested_svm_vmrun()
2472 svm->vmcb->save.es = nested_vmcb->save.es; in nested_svm_vmrun()
2473 svm->vmcb->save.cs = nested_vmcb->save.cs; in nested_svm_vmrun()
2474 svm->vmcb->save.ss = nested_vmcb->save.ss; in nested_svm_vmrun()
2475 svm->vmcb->save.ds = nested_vmcb->save.ds; in nested_svm_vmrun()
2476 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; in nested_svm_vmrun()
2477 svm->vmcb->save.idtr = nested_vmcb->save.idtr; in nested_svm_vmrun()
2478 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); in nested_svm_vmrun()
2479 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); in nested_svm_vmrun()
2480 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); in nested_svm_vmrun()
2481 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); in nested_svm_vmrun()
2483 svm->vmcb->save.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2484 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2486 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); in nested_svm_vmrun()
2489 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmrun()
2491 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; in nested_svm_vmrun()
2492 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); in nested_svm_vmrun()
2493 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); in nested_svm_vmrun()
2494 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); in nested_svm_vmrun()
2497 svm->vmcb->save.rax = nested_vmcb->save.rax; in nested_svm_vmrun()
2498 svm->vmcb->save.rsp = nested_vmcb->save.rsp; in nested_svm_vmrun()
2499 svm->vmcb->save.rip = nested_vmcb->save.rip; in nested_svm_vmrun()
2500 svm->vmcb->save.dr7 = nested_vmcb->save.dr7; in nested_svm_vmrun()
2501 svm->vmcb->save.dr6 = nested_vmcb->save.dr6; in nested_svm_vmrun()
2502 svm->vmcb->save.cpl = nested_vmcb->save.cpl; in nested_svm_vmrun()
2504 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2505 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; in nested_svm_vmrun()
2508 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; in nested_svm_vmrun()
2509 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; in nested_svm_vmrun()
2510 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; in nested_svm_vmrun()
2511 svm->nested.intercept = nested_vmcb->control.intercept; in nested_svm_vmrun()
2513 svm_flush_tlb(&svm->vcpu); in nested_svm_vmrun()
2514 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; in nested_svm_vmrun()
2516 svm->vcpu.arch.hflags |= HF_VINTR_MASK; in nested_svm_vmrun()
2518 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; in nested_svm_vmrun()
2520 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { in nested_svm_vmrun()
2522 clr_cr_intercept(svm, INTERCEPT_CR8_READ); in nested_svm_vmrun()
2523 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in nested_svm_vmrun()
2527 clr_intercept(svm, INTERCEPT_VMMCALL); in nested_svm_vmrun()
2529 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; in nested_svm_vmrun()
2530 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; in nested_svm_vmrun()
2531 svm->vmcb->control.int_state = nested_vmcb->control.int_state; in nested_svm_vmrun()
2532 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; in nested_svm_vmrun()
2533 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; in nested_svm_vmrun()
2534 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; in nested_svm_vmrun()
2539 enter_guest_mode(&svm->vcpu); in nested_svm_vmrun()
2545 recalc_intercepts(svm); in nested_svm_vmrun()
2547 svm->nested.vmcb = vmcb_gpa; in nested_svm_vmrun()
2549 enable_gif(svm); in nested_svm_vmrun()
2551 mark_all_dirty(svm->vmcb); in nested_svm_vmrun()
2572 static int vmload_interception(struct vcpu_svm *svm) in vmload_interception() argument
2577 if (nested_svm_check_permissions(svm)) in vmload_interception()
2580 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in vmload_interception()
2584 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmload_interception()
2585 skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2587 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); in vmload_interception()
2593 static int vmsave_interception(struct vcpu_svm *svm) in vmsave_interception() argument
2598 if (nested_svm_check_permissions(svm)) in vmsave_interception()
2601 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); in vmsave_interception()
2605 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmsave_interception()
2606 skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2608 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); in vmsave_interception()
2614 static int vmrun_interception(struct vcpu_svm *svm) in vmrun_interception() argument
2616 if (nested_svm_check_permissions(svm)) in vmrun_interception()
2620 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); in vmrun_interception()
2622 if (!nested_svm_vmrun(svm)) in vmrun_interception()
2625 if (!nested_svm_vmrun_msrpm(svm)) in vmrun_interception()
2632 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in vmrun_interception()
2633 svm->vmcb->control.exit_code_hi = 0; in vmrun_interception()
2634 svm->vmcb->control.exit_info_1 = 0; in vmrun_interception()
2635 svm->vmcb->control.exit_info_2 = 0; in vmrun_interception()
2637 nested_svm_vmexit(svm); in vmrun_interception()
2642 static int stgi_interception(struct vcpu_svm *svm) in stgi_interception() argument
2644 if (nested_svm_check_permissions(svm)) in stgi_interception()
2647 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in stgi_interception()
2648 skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2649 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in stgi_interception()
2651 enable_gif(svm); in stgi_interception()
2656 static int clgi_interception(struct vcpu_svm *svm) in clgi_interception() argument
2658 if (nested_svm_check_permissions(svm)) in clgi_interception()
2661 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in clgi_interception()
2662 skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2664 disable_gif(svm); in clgi_interception()
2667 svm_clear_vintr(svm); in clgi_interception()
2668 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in clgi_interception()
2670 mark_dirty(svm->vmcb, VMCB_INTR); in clgi_interception()
2675 static int invlpga_interception(struct vcpu_svm *svm) in invlpga_interception() argument
2677 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception()
2679 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), in invlpga_interception()
2680 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2683 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2685 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in invlpga_interception()
2686 skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2690 static int skinit_interception(struct vcpu_svm *svm) in skinit_interception() argument
2692 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in skinit_interception()
2694 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2698 static int wbinvd_interception(struct vcpu_svm *svm) in wbinvd_interception() argument
2700 kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2704 static int xsetbv_interception(struct vcpu_svm *svm) in xsetbv_interception() argument
2706 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2707 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in xsetbv_interception()
2709 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2710 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in xsetbv_interception()
2711 skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2717 static int task_switch_interception(struct vcpu_svm *svm) in task_switch_interception() argument
2721 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2723 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2725 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2727 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2731 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2733 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2736 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2747 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2750 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2754 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2756 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2759 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2770 skip_emulated_instruction(&svm->vcpu); in task_switch_interception()
2775 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2777 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in task_switch_interception()
2778 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in task_switch_interception()
2779 svm->vcpu.run->internal.ndata = 0; in task_switch_interception()
2785 static int cpuid_interception(struct vcpu_svm *svm) in cpuid_interception() argument
2787 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in cpuid_interception()
2788 kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2792 static int iret_interception(struct vcpu_svm *svm) in iret_interception() argument
2794 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2795 clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
2796 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2797 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2798 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2802 static int invlpg_interception(struct vcpu_svm *svm) in invlpg_interception() argument
2805 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in invlpg_interception()
2807 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2808 skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2812 static int emulate_on_interception(struct vcpu_svm *svm) in emulate_on_interception() argument
2814 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in emulate_on_interception()
2817 static int rdpmc_interception(struct vcpu_svm *svm) in rdpmc_interception() argument
2822 return emulate_on_interception(svm); in rdpmc_interception()
2824 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2825 kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2830 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, in check_selective_cr0_intercepted() argument
2833 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2837 intercept = svm->nested.intercept; in check_selective_cr0_intercepted()
2839 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2847 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2848 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2856 static int cr_interception(struct vcpu_svm *svm) in cr_interception() argument
2863 return emulate_on_interception(svm); in cr_interception()
2865 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2866 return emulate_on_interception(svm); in cr_interception()
2868 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2869 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2872 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2877 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
2880 if (!check_selective_cr0_intercepted(svm, val)) in cr_interception()
2881 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2887 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2890 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2893 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2897 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2903 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2906 val = svm->vcpu.arch.cr2; in cr_interception()
2909 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2912 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2915 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
2919 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2922 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
2924 kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
2929 static int dr_interception(struct vcpu_svm *svm) in dr_interception() argument
2934 if (svm->vcpu.guest_debug == 0) { in dr_interception()
2940 clr_dr_intercepts(svm); in dr_interception()
2941 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2946 return emulate_on_interception(svm); in dr_interception()
2948 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2949 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2952 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
2954 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
2955 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
2957 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
2959 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
2960 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
2963 skip_emulated_instruction(&svm->vcpu); in dr_interception()
2968 static int cr8_write_interception(struct vcpu_svm *svm) in cr8_write_interception() argument
2970 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
2973 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
2975 r = cr_interception(svm); in cr8_write_interception()
2976 if (lapic_in_kernel(&svm->vcpu)) in cr8_write_interception()
2978 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
2992 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
2996 msr_info->data = svm->vmcb->control.tsc_offset + in svm_get_msr()
3002 msr_info->data = svm->vmcb->save.star; in svm_get_msr()
3006 msr_info->data = svm->vmcb->save.lstar; in svm_get_msr()
3009 msr_info->data = svm->vmcb->save.cstar; in svm_get_msr()
3012 msr_info->data = svm->vmcb->save.kernel_gs_base; in svm_get_msr()
3015 msr_info->data = svm->vmcb->save.sfmask; in svm_get_msr()
3019 msr_info->data = svm->vmcb->save.sysenter_cs; in svm_get_msr()
3022 msr_info->data = svm->sysenter_eip; in svm_get_msr()
3025 msr_info->data = svm->sysenter_esp; in svm_get_msr()
3033 msr_info->data = svm->vmcb->save.dbgctl; in svm_get_msr()
3036 msr_info->data = svm->vmcb->save.br_from; in svm_get_msr()
3039 msr_info->data = svm->vmcb->save.br_to; in svm_get_msr()
3042 msr_info->data = svm->vmcb->save.last_excp_from; in svm_get_msr()
3045 msr_info->data = svm->vmcb->save.last_excp_to; in svm_get_msr()
3048 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
3051 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
3062 static int rdmsr_interception(struct vcpu_svm *svm) in rdmsr_interception() argument
3064 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in rdmsr_interception()
3069 if (svm_get_msr(&svm->vcpu, &msr_info)) { in rdmsr_interception()
3071 kvm_inject_gp(&svm->vcpu, 0); in rdmsr_interception()
3075 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, in rdmsr_interception()
3077 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, in rdmsr_interception()
3079 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in rdmsr_interception()
3080 skip_emulated_instruction(&svm->vcpu); in rdmsr_interception()
3087 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
3095 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
3098 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
3099 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
3101 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
3112 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
3121 svm->vmcb->save.star = data; in svm_set_msr()
3125 svm->vmcb->save.lstar = data; in svm_set_msr()
3128 svm->vmcb->save.cstar = data; in svm_set_msr()
3131 svm->vmcb->save.kernel_gs_base = data; in svm_set_msr()
3134 svm->vmcb->save.sfmask = data; in svm_set_msr()
3138 svm->vmcb->save.sysenter_cs = data; in svm_set_msr()
3141 svm->sysenter_eip = data; in svm_set_msr()
3142 svm->vmcb->save.sysenter_eip = data; in svm_set_msr()
3145 svm->sysenter_esp = data; in svm_set_msr()
3146 svm->vmcb->save.sysenter_esp = data; in svm_set_msr()
3157 svm->vmcb->save.dbgctl = data; in svm_set_msr()
3158 mark_dirty(svm->vmcb, VMCB_LBR); in svm_set_msr()
3160 svm_enable_lbrv(svm); in svm_set_msr()
3162 svm_disable_lbrv(svm); in svm_set_msr()
3165 svm->nested.hsave_msr = data; in svm_set_msr()
3178 static int wrmsr_interception(struct vcpu_svm *svm) in wrmsr_interception() argument
3181 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in wrmsr_interception()
3182 u64 data = kvm_read_edx_eax(&svm->vcpu); in wrmsr_interception()
3188 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in wrmsr_interception()
3189 if (kvm_set_msr(&svm->vcpu, &msr)) { in wrmsr_interception()
3191 kvm_inject_gp(&svm->vcpu, 0); in wrmsr_interception()
3194 skip_emulated_instruction(&svm->vcpu); in wrmsr_interception()
3199 static int msr_interception(struct vcpu_svm *svm) in msr_interception() argument
3201 if (svm->vmcb->control.exit_info_1) in msr_interception()
3202 return wrmsr_interception(svm); in msr_interception()
3204 return rdmsr_interception(svm); in msr_interception()
3207 static int interrupt_window_interception(struct vcpu_svm *svm) in interrupt_window_interception() argument
3209 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
3210 svm_clear_vintr(svm); in interrupt_window_interception()
3211 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in interrupt_window_interception()
3212 mark_dirty(svm->vmcb, VMCB_INTR); in interrupt_window_interception()
3213 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
3217 static int pause_interception(struct vcpu_svm *svm) in pause_interception() argument
3219 kvm_vcpu_on_spin(&(svm->vcpu)); in pause_interception()
3223 static int nop_interception(struct vcpu_svm *svm) in nop_interception() argument
3225 skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
3229 static int monitor_interception(struct vcpu_svm *svm) in monitor_interception() argument
3232 return nop_interception(svm); in monitor_interception()
3235 static int mwait_interception(struct vcpu_svm *svm) in mwait_interception() argument
3238 return nop_interception(svm); in mwait_interception()
3241 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3308 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
3309 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
3310 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3421 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit() local
3423 u32 exit_code = svm->vmcb->control.exit_code; in handle_exit()
3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) in handle_exit()
3428 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3430 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3432 if (unlikely(svm->nested.exit_required)) { in handle_exit()
3433 nested_svm_vmexit(svm); in handle_exit()
3434 svm->nested.exit_required = false; in handle_exit()
3442 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, in handle_exit()
3443 svm->vmcb->control.exit_info_1, in handle_exit()
3444 svm->vmcb->control.exit_info_2, in handle_exit()
3445 svm->vmcb->control.exit_int_info, in handle_exit()
3446 svm->vmcb->control.exit_int_info_err, in handle_exit()
3449 vmexit = nested_svm_exit_special(svm); in handle_exit()
3452 vmexit = nested_svm_exit_handled(svm); in handle_exit()
3458 svm_complete_interrupts(svm); in handle_exit()
3460 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in handle_exit()
3463 = svm->vmcb->control.exit_code; in handle_exit()
3469 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && in handle_exit()
3475 __func__, svm->vmcb->control.exit_int_info, in handle_exit()
3485 return svm_exit_handlers[exit_code](svm); in handle_exit()
3497 static void pre_svm_run(struct vcpu_svm *svm) in pre_svm_run() argument
3504 if (svm->asid_generation != sd->asid_generation) in pre_svm_run()
3505 new_asid(svm, sd); in pre_svm_run()
3510 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3512 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3514 set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
3518 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) in svm_inject_irq() argument
3522 control = &svm->vmcb->control; in svm_inject_irq()
3527 mark_dirty(svm->vmcb, VMCB_INTR); in svm_inject_irq()
3532 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq() local
3534 BUG_ON(!(gif_set(svm))); in svm_set_irq()
3539 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3545 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept() local
3550 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3556 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3581 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3582 struct vmcb *vmcb = svm->vmcb; in svm_nmi_allowed()
3585 !(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_allowed()
3586 ret = ret && gif_set(svm) && nested_svm_nmi(svm); in svm_nmi_allowed()
3593 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
3595 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3600 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3603 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3604 set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3606 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3607 clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3613 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3614 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_allowed()
3617 if (!gif_set(svm) || in svm_interrupt_allowed()
3624 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); in svm_interrupt_allowed()
3631 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window() local
3639 if (gif_set(svm) && nested_svm_intr(svm)) { in enable_irq_window()
3640 svm_set_vintr(svm); in enable_irq_window()
3641 svm_inject_irq(svm, 0x0); in enable_irq_window()
3647 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window() local
3649 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3657 svm->nmi_singlestep = true; in enable_nmi_window()
3658 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in enable_nmi_window()
3668 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb() local
3671 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb()
3673 svm->asid_generation--; in svm_flush_tlb()
3682 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3687 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3688 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3695 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
3702 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3703 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3706 static void svm_complete_interrupts(struct vcpu_svm *svm) in svm_complete_interrupts() argument
3710 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
3711 unsigned int3_injected = svm->int3_injected; in svm_complete_interrupts()
3713 svm->int3_injected = 0; in svm_complete_interrupts()
3719 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3720 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3721 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3722 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3725 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3726 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3727 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3732 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3739 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3749 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3750 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3751 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3756 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
3757 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3760 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3763 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3772 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
3773 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
3778 svm_complete_interrupts(svm); in svm_cancel_injection()
3783 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
3785 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3786 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3787 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3793 if (unlikely(svm->nested.exit_required)) in svm_vcpu_run()
3796 pre_svm_run(svm); in svm_vcpu_run()
3800 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3852 : [svm]"a"(svm), in svm_vcpu_run()
3880 wrmsrl(MSR_GS_BASE, svm->host.gs_base); in svm_vcpu_run()
3882 loadsegment(fs, svm->host.fs); in svm_vcpu_run()
3884 loadsegment(gs, svm->host.gs); in svm_vcpu_run()
3892 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3893 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3898 kvm_before_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3904 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3905 kvm_after_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3909 svm->next_rip = 0; in svm_vcpu_run()
3911 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
3914 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
3915 svm->apf_reason = kvm_read_and_reset_pf_reason(); in svm_vcpu_run()
3926 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
3928 svm_handle_mce(svm); in svm_vcpu_run()
3930 mark_all_clean(svm->vmcb); in svm_vcpu_run()
3935 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr3() local
3937 svm->vmcb->save.cr3 = root; in svm_set_cr3()
3938 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr3()
3944 struct vcpu_svm *svm = to_svm(vcpu); in set_tdp_cr3() local
3946 svm->vmcb->control.nested_cr3 = root; in set_tdp_cr3()
3947 mark_dirty(svm->vmcb, VMCB_NPT); in set_tdp_cr3()
3950 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); in set_tdp_cr3()
3951 mark_dirty(svm->vmcb, VMCB_CR); in set_tdp_cr3()
4000 struct vcpu_svm *svm = to_svm(vcpu); in svm_cpuid_update() local
4003 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); in svm_cpuid_update()
4065 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_deactivate() local
4067 set_exception_intercept(svm, NM_VECTOR); in svm_fpu_deactivate()
4068 update_cr0_intercept(svm); in svm_fpu_deactivate()
4138 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
4141 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4167 intercept = svm->nested.intercept; in svm_check_intercept()
4245 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()