Lines Matching refs:vcpu

130 	struct kvm_vcpu vcpu;  member
204 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
249 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) in to_svm() argument
251 return container_of(vcpu, struct vcpu_svm, vcpu); in to_svm()
261 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
276 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
378 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
383 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
388 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
473 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) in svm_set_efer() argument
475 vcpu->arch.efer = efer; in svm_set_efer()
479 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
480 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_efer()
489 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) in svm_get_interrupt_shadow() argument
491 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow()
499 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) in svm_set_interrupt_shadow() argument
501 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow()
510 static void skip_emulated_instruction(struct kvm_vcpu *vcpu) in skip_emulated_instruction() argument
512 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction()
520 if (emulate_instruction(vcpu, EMULTYPE_SKIP) != in skip_emulated_instruction()
525 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) in skip_emulated_instruction()
527 __func__, kvm_rip_read(vcpu), svm->next_rip); in skip_emulated_instruction()
529 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
530 svm_set_interrupt_shadow(vcpu, 0); in skip_emulated_instruction()
533 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, in svm_queue_exception() argument
537 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception()
548 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
557 skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
558 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
594 static void svm_init_osvw(struct kvm_vcpu *vcpu) in svm_init_osvw() argument
600 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; in svm_init_osvw()
601 vcpu->arch.osvw.status = osvw_status & ~(6ULL); in svm_init_osvw()
612 vcpu->arch.osvw.status |= 1; in svm_init_osvw()
984 static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) in svm_scale_tsc() argument
986 struct vcpu_svm *svm = to_svm(vcpu); in svm_scale_tsc()
995 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in svm_set_tsc_khz() argument
997 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_tsc_khz()
1010 vcpu->arch.tsc_catchup = 1; in svm_set_tsc_khz()
1011 vcpu->arch.tsc_always_catchup = 1; in svm_set_tsc_khz()
1031 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu) in svm_read_tsc_offset() argument
1033 struct vcpu_svm *svm = to_svm(vcpu); in svm_read_tsc_offset()
1038 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in svm_write_tsc_offset() argument
1040 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset()
1043 if (is_guest_mode(vcpu)) { in svm_write_tsc_offset()
1048 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in svm_write_tsc_offset()
1057 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) in svm_adjust_tsc_offset() argument
1059 struct vcpu_svm *svm = to_svm(vcpu); in svm_adjust_tsc_offset()
1064 adjustment = svm_scale_tsc(vcpu, (u64)adjustment); in svm_adjust_tsc_offset()
1068 if (is_guest_mode(vcpu)) in svm_adjust_tsc_offset()
1071 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in svm_adjust_tsc_offset()
1078 static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in svm_compute_tsc_offset() argument
1082 tsc = svm_scale_tsc(vcpu, native_read_tsc()); in svm_compute_tsc_offset()
1092 svm->vcpu.fpu_active = 1; in init_vmcb()
1093 svm->vcpu.arch.hflags = 0; in init_vmcb()
1160 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1162 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1164 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1170 svm->vcpu.arch.cr0 = 0; in init_vmcb()
1171 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1190 svm->vcpu.arch.hflags = 0; in init_vmcb()
1202 static void svm_vcpu_reset(struct kvm_vcpu *vcpu) in svm_vcpu_reset() argument
1204 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset()
1210 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); in svm_vcpu_reset()
1211 kvm_register_write(vcpu, VCPU_REGS_RDX, eax); in svm_vcpu_reset()
1231 err = kvm_vcpu_init(&svm->vcpu, kvm, id); in svm_create_vcpu()
1266 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_create_vcpu()
1268 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_create_vcpu()
1269 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_create_vcpu()
1271 svm_init_osvw(&svm->vcpu); in svm_create_vcpu()
1273 return &svm->vcpu; in svm_create_vcpu()
1282 kvm_vcpu_uninit(&svm->vcpu); in svm_create_vcpu()
1289 static void svm_free_vcpu(struct kvm_vcpu *vcpu) in svm_free_vcpu() argument
1291 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu()
1297 kvm_vcpu_uninit(vcpu); in svm_free_vcpu()
1301 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in svm_vcpu_load() argument
1303 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load()
1306 if (unlikely(cpu != vcpu->cpu)) { in svm_vcpu_load()
1312 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); in svm_vcpu_load()
1328 static void svm_vcpu_put(struct kvm_vcpu *vcpu) in svm_vcpu_put() argument
1330 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put()
1333 ++vcpu->stat.host_state_reload; in svm_vcpu_put()
1348 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) in svm_get_rflags() argument
1350 return to_svm(vcpu)->vmcb->save.rflags; in svm_get_rflags()
1353 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in svm_set_rflags() argument
1360 to_svm(vcpu)->vmcb->save.rflags = rflags; in svm_set_rflags()
1363 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) in svm_cache_reg() argument
1368 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in svm_cache_reg()
1385 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) in svm_seg() argument
1387 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_seg()
1403 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) in svm_get_segment_base() argument
1405 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_get_segment_base()
1410 static void svm_get_segment(struct kvm_vcpu *vcpu, in svm_get_segment() argument
1413 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_get_segment()
1473 var->dpl = to_svm(vcpu)->vmcb->save.cpl; in svm_get_segment()
1478 static int svm_get_cpl(struct kvm_vcpu *vcpu) in svm_get_cpl() argument
1480 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_get_cpl()
1485 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_get_idt() argument
1487 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt()
1493 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_set_idt() argument
1495 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt()
1502 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_get_gdt() argument
1504 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt()
1510 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_set_gdt() argument
1512 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt()
1519 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) in svm_decache_cr0_guest_bits() argument
1523 static void svm_decache_cr3(struct kvm_vcpu *vcpu) in svm_decache_cr3() argument
1527 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) in svm_decache_cr4_guest_bits() argument
1533 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1536 if (!svm->vcpu.fpu_active) in update_cr0_intercept()
1544 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { in update_cr0_intercept()
1553 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in svm_set_cr0() argument
1555 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0()
1558 if (vcpu->arch.efer & EFER_LME) { in svm_set_cr0()
1559 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { in svm_set_cr0()
1560 vcpu->arch.efer |= EFER_LMA; in svm_set_cr0()
1564 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { in svm_set_cr0()
1565 vcpu->arch.efer &= ~EFER_LMA; in svm_set_cr0()
1570 vcpu->arch.cr0 = cr0; in svm_set_cr0()
1575 if (!vcpu->fpu_active) in svm_set_cr0()
1588 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in svm_set_cr4() argument
1591 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; in svm_set_cr4()
1597 svm_flush_tlb(vcpu); in svm_set_cr4()
1599 vcpu->arch.cr4 = cr4; in svm_set_cr4()
1603 to_svm(vcpu)->vmcb->save.cr4 = cr4; in svm_set_cr4()
1604 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_cr4()
1608 static void svm_set_segment(struct kvm_vcpu *vcpu, in svm_set_segment() argument
1611 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment()
1612 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_set_segment()
1642 static void update_bp_intercept(struct kvm_vcpu *vcpu) in update_bp_intercept() argument
1644 struct vcpu_svm *svm = to_svm(vcpu); in update_bp_intercept()
1648 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { in update_bp_intercept()
1649 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in update_bp_intercept()
1652 vcpu->guest_debug = 0; in update_bp_intercept()
1669 static u64 svm_get_dr6(struct kvm_vcpu *vcpu) in svm_get_dr6() argument
1671 return to_svm(vcpu)->vmcb->save.dr6; in svm_get_dr6()
1674 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) in svm_set_dr6() argument
1676 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr6()
1682 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) in svm_sync_dirty_debug_regs() argument
1684 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs()
1686 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
1687 get_debugreg(vcpu->arch.db[1], 1); in svm_sync_dirty_debug_regs()
1688 get_debugreg(vcpu->arch.db[2], 2); in svm_sync_dirty_debug_regs()
1689 get_debugreg(vcpu->arch.db[3], 3); in svm_sync_dirty_debug_regs()
1690 vcpu->arch.dr6 = svm_get_dr6(vcpu); in svm_sync_dirty_debug_regs()
1691 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1693 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in svm_sync_dirty_debug_regs()
1697 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) in svm_set_dr7() argument
1699 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7()
1716 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) in pf_interception()
1717 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); in pf_interception()
1718 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in pf_interception()
1740 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1742 if (!(svm->vcpu.guest_debug & in db_interception()
1745 kvm_queue_exception(&svm->vcpu, DB_VECTOR); in db_interception()
1751 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) in db_interception()
1756 if (svm->vcpu.guest_debug & in db_interception()
1770 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1782 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); in ud_interception()
1784 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in ud_interception()
1790 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1794 static void svm_fpu_activate(struct kvm_vcpu *vcpu) in svm_fpu_activate() argument
1796 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_activate()
1800 svm->vcpu.fpu_active = 1; in svm_fpu_activate()
1806 svm_fpu_activate(&svm->vcpu); in nm_interception()
1858 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1881 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1896 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception() local
1901 ++svm->vcpu.stat.io_exits; in io_interception()
1905 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in io_interception()
1910 skip_emulated_instruction(&svm->vcpu); in io_interception()
1912 return kvm_fast_pio_out(vcpu, size, port); in io_interception()
1922 ++svm->vcpu.stat.irq_exits; in intr_interception()
1933 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; in halt_interception()
1934 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
1939 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmmcall_interception()
1940 kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
1944 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) in nested_svm_get_tdp_cr3() argument
1946 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3()
1951 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) in nested_svm_get_tdp_pdptr() argument
1953 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr()
1958 ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte, in nested_svm_get_tdp_pdptr()
1965 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, in nested_svm_set_tdp_cr3() argument
1968 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_set_tdp_cr3()
1972 svm_flush_tlb(vcpu); in nested_svm_set_tdp_cr3()
1975 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, in nested_svm_inject_npf_exit() argument
1978 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit()
2004 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) in nested_svm_init_mmu_context() argument
2006 WARN_ON(mmu_is_nested(vcpu)); in nested_svm_init_mmu_context()
2007 kvm_init_shadow_mmu(vcpu); in nested_svm_init_mmu_context()
2008 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; in nested_svm_init_mmu_context()
2009 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; in nested_svm_init_mmu_context()
2010 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; in nested_svm_init_mmu_context()
2011 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; in nested_svm_init_mmu_context()
2012 vcpu->arch.mmu.shadow_root_level = get_npt_level(); in nested_svm_init_mmu_context()
2013 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()
2016 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) in nested_svm_uninit_mmu_context() argument
2018 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_svm_uninit_mmu_context()
2023 if (!(svm->vcpu.arch.efer & EFER_SVME) in nested_svm_check_permissions()
2024 || !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
2025 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
2030 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
2042 if (!is_guest_mode(&svm->vcpu)) in nested_svm_check_exception()
2048 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_check_exception()
2060 if (!is_guest_mode(&svm->vcpu)) in nested_svm_intr()
2063 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_intr()
2066 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) in nested_svm_intr()
2099 if (!is_guest_mode(&svm->vcpu)) in nested_svm_nmi()
2117 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); in nested_svm_map()
2126 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_map()
2156 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
2170 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
2181 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
2328 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
2340 nested_vmcb->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
2341 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
2342 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
2344 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
2345 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
2384 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_vmexit()
2390 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
2391 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
2402 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
2403 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
2404 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
2405 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
2408 svm->vcpu.arch.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2410 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); in nested_svm_vmexit()
2412 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); in nested_svm_vmexit()
2413 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); in nested_svm_vmexit()
2414 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); in nested_svm_vmexit()
2423 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
2424 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmexit()
2425 kvm_mmu_load(&svm->vcpu); in nested_svm_vmexit()
2452 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4)) in nested_svm_vmrun_msrpm()
2514 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
2515 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
2527 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
2528 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
2529 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
2530 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
2531 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
2537 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
2541 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) in nested_svm_vmrun()
2542 svm->vcpu.arch.hflags |= HF_HIF_MASK; in nested_svm_vmrun()
2544 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; in nested_svm_vmrun()
2547 kvm_mmu_unload(&svm->vcpu); in nested_svm_vmrun()
2549 nested_svm_init_mmu_context(&svm->vcpu); in nested_svm_vmrun()
2559 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); in nested_svm_vmrun()
2560 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); in nested_svm_vmrun()
2561 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); in nested_svm_vmrun()
2562 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); in nested_svm_vmrun()
2565 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2567 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); in nested_svm_vmrun()
2570 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmrun()
2572 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; in nested_svm_vmrun()
2573 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); in nested_svm_vmrun()
2574 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); in nested_svm_vmrun()
2575 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); in nested_svm_vmrun()
2594 svm_flush_tlb(&svm->vcpu); in nested_svm_vmrun()
2597 svm->vcpu.arch.hflags |= HF_VINTR_MASK; in nested_svm_vmrun()
2599 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; in nested_svm_vmrun()
2601 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { in nested_svm_vmrun()
2620 enter_guest_mode(&svm->vcpu); in nested_svm_vmrun()
2665 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmload_interception()
2666 skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2686 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmsave_interception()
2687 skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2701 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); in vmrun_interception()
2728 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in stgi_interception()
2729 skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2730 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in stgi_interception()
2742 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in clgi_interception()
2743 skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2758 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception() local
2760 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), in invlpga_interception()
2761 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2764 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2766 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in invlpga_interception()
2767 skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2773 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in skinit_interception()
2775 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2781 kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2787 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2788 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in xsetbv_interception()
2790 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2791 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in xsetbv_interception()
2792 skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2828 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2837 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2840 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2851 skip_emulated_instruction(&svm->vcpu); in task_switch_interception()
2856 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2858 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in task_switch_interception()
2859 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in task_switch_interception()
2860 svm->vcpu.run->internal.ndata = 0; in task_switch_interception()
2868 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in cpuid_interception()
2869 kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2875 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2877 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2878 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2879 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2886 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in invlpg_interception()
2888 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2889 skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2895 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in emulate_on_interception()
2905 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2906 kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2914 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2920 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2958 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
2962 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2968 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2971 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2974 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2978 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2984 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2987 val = svm->vcpu.arch.cr2; in cr_interception()
2990 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2993 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2996 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
3000 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
3003 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
3005 kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
3015 if (svm->vcpu.guest_debug == 0) { in dr_interception()
3022 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
3033 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
3035 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
3036 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
3038 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
3040 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
3041 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
3044 skip_emulated_instruction(&svm->vcpu); in dr_interception()
3051 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
3054 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
3057 if (irqchip_in_kernel(svm->vcpu.kvm)) in cr8_write_interception()
3059 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
3065 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in svm_read_l1_tsc() argument
3067 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); in svm_read_l1_tsc()
3069 svm_scale_tsc(vcpu, host_tsc); in svm_read_l1_tsc()
3072 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) in svm_get_msr() argument
3074 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr()
3079 svm_scale_tsc(vcpu, native_read_tsc()); in svm_get_msr()
3139 return kvm_get_msr_common(vcpu, ecx, data); in svm_get_msr()
3146 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in rdmsr_interception()
3149 if (svm_get_msr(&svm->vcpu, ecx, &data)) { in rdmsr_interception()
3151 kvm_inject_gp(&svm->vcpu, 0); in rdmsr_interception()
3155 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); in rdmsr_interception()
3156 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); in rdmsr_interception()
3157 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in rdmsr_interception()
3158 skip_emulated_instruction(&svm->vcpu); in rdmsr_interception()
3163 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) in svm_set_vm_cr() argument
3165 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr()
3182 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) in svm_set_vm_cr()
3188 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in svm_set_msr() argument
3190 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr()
3196 kvm_write_tsc(vcpu, msr); in svm_set_msr()
3228 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", in svm_set_msr()
3246 return svm_set_vm_cr(vcpu, data); in svm_set_msr()
3248 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); in svm_set_msr()
3251 return kvm_set_msr_common(vcpu, msr); in svm_set_msr()
3259 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in wrmsr_interception()
3260 u64 data = kvm_read_edx_eax(&svm->vcpu); in wrmsr_interception()
3266 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in wrmsr_interception()
3267 if (kvm_set_msr(&svm->vcpu, &msr)) { in wrmsr_interception()
3269 kvm_inject_gp(&svm->vcpu, 0); in wrmsr_interception()
3272 skip_emulated_instruction(&svm->vcpu); in wrmsr_interception()
3287 struct kvm_run *kvm_run = svm->vcpu.run; in interrupt_window_interception()
3289 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
3293 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
3298 if (!irqchip_in_kernel(svm->vcpu.kvm) && in interrupt_window_interception()
3300 !kvm_cpu_has_interrupt(&svm->vcpu)) { in interrupt_window_interception()
3310 kvm_vcpu_on_spin(&(svm->vcpu)); in pause_interception()
3316 skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
3396 static void dump_vmcb(struct kvm_vcpu *vcpu) in dump_vmcb() argument
3398 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb()
3501 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) in svm_get_exit_info() argument
3503 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; in svm_get_exit_info()
3509 static int handle_exit(struct kvm_vcpu *vcpu) in handle_exit() argument
3511 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit()
3512 struct kvm_run *kvm_run = vcpu->run; in handle_exit()
3516 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3518 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3527 if (is_guest_mode(vcpu)) { in handle_exit()
3553 dump_vmcb(vcpu); in handle_exit()
3569 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exit()
3576 static void reload_tss(struct kvm_vcpu *vcpu) in reload_tss() argument
3596 static void svm_inject_nmi(struct kvm_vcpu *vcpu) in svm_inject_nmi() argument
3598 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi()
3601 vcpu->arch.hflags |= HF_NMI_MASK; in svm_inject_nmi()
3603 ++vcpu->stat.nmi_injections; in svm_inject_nmi()
3618 static void svm_set_irq(struct kvm_vcpu *vcpu) in svm_set_irq() argument
3620 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq()
3624 trace_kvm_inj_virq(vcpu->arch.interrupt.nr); in svm_set_irq()
3625 ++vcpu->stat.irq_injections; in svm_set_irq()
3627 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3631 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) in update_cr8_intercept() argument
3633 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept()
3635 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in update_cr8_intercept()
3647 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) in svm_set_virtual_x2apic_mode() argument
3657 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) in svm_load_eoi_exitmap() argument
3662 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) in svm_sync_pir_to_irr() argument
3667 static int svm_nmi_allowed(struct kvm_vcpu *vcpu) in svm_nmi_allowed() argument
3669 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed()
3673 !(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_allowed()
3679 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) in svm_get_nmi_mask() argument
3681 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask()
3683 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3686 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) in svm_set_nmi_mask() argument
3688 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask()
3691 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3694 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3699 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) in svm_interrupt_allowed() argument
3701 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed()
3709 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); in svm_interrupt_allowed()
3711 if (is_guest_mode(vcpu)) in svm_interrupt_allowed()
3712 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); in svm_interrupt_allowed()
3717 static void enable_irq_window(struct kvm_vcpu *vcpu) in enable_irq_window() argument
3719 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window()
3733 static void enable_nmi_window(struct kvm_vcpu *vcpu) in enable_nmi_window() argument
3735 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window()
3737 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3754 static void svm_flush_tlb(struct kvm_vcpu *vcpu) in svm_flush_tlb() argument
3756 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb()
3764 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) in svm_prepare_guest_switch() argument
3768 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) in sync_cr8_to_lapic() argument
3770 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic()
3772 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in sync_cr8_to_lapic()
3777 kvm_set_cr8(vcpu, cr8); in sync_cr8_to_lapic()
3781 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) in sync_lapic_to_cr8() argument
3783 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8()
3786 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in sync_lapic_to_cr8()
3789 cr8 = kvm_get_cr8(vcpu); in sync_lapic_to_cr8()
3807 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3808 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3809 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3810 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3813 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3814 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3815 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3820 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3827 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3837 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3838 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3839 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3845 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3848 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3851 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3858 static void svm_cancel_injection(struct kvm_vcpu *vcpu) in svm_cancel_injection() argument
3860 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection()
3869 static void svm_vcpu_run(struct kvm_vcpu *vcpu) in svm_vcpu_run() argument
3871 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run()
3873 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3874 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3875 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3886 sync_lapic_to_cr8(vcpu); in svm_vcpu_run()
3888 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3942 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), in svm_vcpu_run()
3943 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), in svm_vcpu_run()
3944 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), in svm_vcpu_run()
3945 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), in svm_vcpu_run()
3946 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), in svm_vcpu_run()
3947 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) in svm_vcpu_run()
3949 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), in svm_vcpu_run()
3950 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), in svm_vcpu_run()
3951 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), in svm_vcpu_run()
3952 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), in svm_vcpu_run()
3953 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), in svm_vcpu_run()
3954 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), in svm_vcpu_run()
3955 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), in svm_vcpu_run()
3956 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) in svm_vcpu_run()
3976 reload_tss(vcpu); in svm_vcpu_run()
3980 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3981 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3982 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3983 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3985 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); in svm_vcpu_run()
3988 kvm_before_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3995 kvm_after_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3997 sync_cr8_to_lapic(vcpu); in svm_vcpu_run()
4008 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); in svm_vcpu_run()
4009 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); in svm_vcpu_run()
4023 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) in svm_set_cr3() argument
4025 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr3()
4029 svm_flush_tlb(vcpu); in svm_set_cr3()
4032 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) in set_tdp_cr3() argument
4034 struct vcpu_svm *svm = to_svm(vcpu); in set_tdp_cr3()
4040 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); in set_tdp_cr3()
4043 svm_flush_tlb(vcpu); in set_tdp_cr3()
4058 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) in svm_patch_hypercall() argument
4078 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in svm_get_mt_mask() argument
4083 static void svm_cpuid_update(struct kvm_vcpu *vcpu) in svm_cpuid_update() argument
4144 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) in svm_fpu_deactivate() argument
4146 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_deactivate()
4215 static int svm_check_intercept(struct kvm_vcpu *vcpu, in svm_check_intercept() argument
4219 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept()
4253 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4335 static void svm_handle_external_intr(struct kvm_vcpu *vcpu) in svm_handle_external_intr() argument
4340 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) in svm_sched_in() argument