Lines Matching refs:vcpu

131 	struct kvm_vcpu vcpu;  member
206 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
207 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
251 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) in to_svm() argument
253 return container_of(vcpu, struct vcpu_svm, vcpu); in to_svm()
263 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
278 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
380 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
385 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
390 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
475 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) in svm_set_efer() argument
477 vcpu->arch.efer = efer; in svm_set_efer()
481 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
482 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_efer()
491 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) in svm_get_interrupt_shadow() argument
493 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow()
501 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) in svm_set_interrupt_shadow() argument
503 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow()
512 static void skip_emulated_instruction(struct kvm_vcpu *vcpu) in skip_emulated_instruction() argument
514 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction()
522 if (emulate_instruction(vcpu, EMULTYPE_SKIP) != in skip_emulated_instruction()
527 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) in skip_emulated_instruction()
529 __func__, kvm_rip_read(vcpu), svm->next_rip); in skip_emulated_instruction()
531 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
532 svm_set_interrupt_shadow(vcpu, 0); in skip_emulated_instruction()
535 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, in svm_queue_exception() argument
539 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception()
550 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
559 skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
560 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
596 static void svm_init_osvw(struct kvm_vcpu *vcpu) in svm_init_osvw() argument
602 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; in svm_init_osvw()
603 vcpu->arch.osvw.status = osvw_status & ~(6ULL); in svm_init_osvw()
614 vcpu->arch.osvw.status |= 1; in svm_init_osvw()
960 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu) in svm_read_tsc_offset() argument
962 struct vcpu_svm *svm = to_svm(vcpu); in svm_read_tsc_offset()
967 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in svm_write_tsc_offset() argument
969 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset()
972 if (is_guest_mode(vcpu)) { in svm_write_tsc_offset()
977 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in svm_write_tsc_offset()
986 static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment) in svm_adjust_tsc_offset_guest() argument
988 struct vcpu_svm *svm = to_svm(vcpu); in svm_adjust_tsc_offset_guest()
991 if (is_guest_mode(vcpu)) in svm_adjust_tsc_offset_guest()
994 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in svm_adjust_tsc_offset_guest()
1006 svm->vcpu.fpu_active = 1; in init_vmcb()
1007 svm->vcpu.arch.hflags = 0; in init_vmcb()
1074 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1076 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1078 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1084 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1085 kvm_mmu_reset_context(&svm->vcpu); in init_vmcb()
1097 save->g_pat = svm->vcpu.arch.pat; in init_vmcb()
1104 svm->vcpu.arch.hflags = 0; in init_vmcb()
1116 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in svm_vcpu_reset() argument
1118 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset()
1123 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_vcpu_reset()
1125 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_vcpu_reset()
1126 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_vcpu_reset()
1130 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); in svm_vcpu_reset()
1131 kvm_register_write(vcpu, VCPU_REGS_RDX, eax); in svm_vcpu_reset()
1149 err = kvm_vcpu_init(&svm->vcpu, kvm, id); in svm_create_vcpu()
1184 svm_init_osvw(&svm->vcpu); in svm_create_vcpu()
1186 return &svm->vcpu; in svm_create_vcpu()
1195 kvm_vcpu_uninit(&svm->vcpu); in svm_create_vcpu()
1202 static void svm_free_vcpu(struct kvm_vcpu *vcpu) in svm_free_vcpu() argument
1204 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu()
1210 kvm_vcpu_uninit(vcpu); in svm_free_vcpu()
1214 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in svm_vcpu_load() argument
1216 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load()
1219 if (unlikely(cpu != vcpu->cpu)) { in svm_vcpu_load()
1225 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); in svm_vcpu_load()
1235 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; in svm_vcpu_load()
1243 static void svm_vcpu_put(struct kvm_vcpu *vcpu) in svm_vcpu_put() argument
1245 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put()
1248 ++vcpu->stat.host_state_reload; in svm_vcpu_put()
1263 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) in svm_get_rflags() argument
1265 return to_svm(vcpu)->vmcb->save.rflags; in svm_get_rflags()
1268 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in svm_set_rflags() argument
1275 to_svm(vcpu)->vmcb->save.rflags = rflags; in svm_set_rflags()
1278 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) in svm_cache_reg() argument
1283 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in svm_cache_reg()
1300 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) in svm_seg() argument
1302 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_seg()
1318 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) in svm_get_segment_base() argument
1320 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_get_segment_base()
1325 static void svm_get_segment(struct kvm_vcpu *vcpu, in svm_get_segment() argument
1328 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_get_segment()
1388 var->dpl = to_svm(vcpu)->vmcb->save.cpl; in svm_get_segment()
1393 static int svm_get_cpl(struct kvm_vcpu *vcpu) in svm_get_cpl() argument
1395 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_get_cpl()
1400 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_get_idt() argument
1402 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt()
1408 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_set_idt() argument
1410 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt()
1417 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_get_gdt() argument
1419 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt()
1425 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) in svm_set_gdt() argument
1427 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt()
1434 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) in svm_decache_cr0_guest_bits() argument
1438 static void svm_decache_cr3(struct kvm_vcpu *vcpu) in svm_decache_cr3() argument
1442 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) in svm_decache_cr4_guest_bits() argument
1448 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1451 if (!svm->vcpu.fpu_active) in update_cr0_intercept()
1459 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) { in update_cr0_intercept()
1468 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in svm_set_cr0() argument
1470 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0()
1473 if (vcpu->arch.efer & EFER_LME) { in svm_set_cr0()
1474 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { in svm_set_cr0()
1475 vcpu->arch.efer |= EFER_LMA; in svm_set_cr0()
1479 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { in svm_set_cr0()
1480 vcpu->arch.efer &= ~EFER_LMA; in svm_set_cr0()
1485 vcpu->arch.cr0 = cr0; in svm_set_cr0()
1490 if (!vcpu->fpu_active) in svm_set_cr0()
1497 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in svm_set_cr0()
1504 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in svm_set_cr4() argument
1507 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; in svm_set_cr4()
1513 svm_flush_tlb(vcpu); in svm_set_cr4()
1515 vcpu->arch.cr4 = cr4; in svm_set_cr4()
1519 to_svm(vcpu)->vmcb->save.cr4 = cr4; in svm_set_cr4()
1520 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); in svm_set_cr4()
1524 static void svm_set_segment(struct kvm_vcpu *vcpu, in svm_set_segment() argument
1527 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment()
1528 struct vmcb_seg *s = svm_seg(vcpu, seg); in svm_set_segment()
1558 static void update_bp_intercept(struct kvm_vcpu *vcpu) in update_bp_intercept() argument
1560 struct vcpu_svm *svm = to_svm(vcpu); in update_bp_intercept()
1564 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { in update_bp_intercept()
1565 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in update_bp_intercept()
1568 vcpu->guest_debug = 0; in update_bp_intercept()
1585 static u64 svm_get_dr6(struct kvm_vcpu *vcpu) in svm_get_dr6() argument
1587 return to_svm(vcpu)->vmcb->save.dr6; in svm_get_dr6()
1590 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) in svm_set_dr6() argument
1592 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr6()
1598 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) in svm_sync_dirty_debug_regs() argument
1600 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs()
1602 get_debugreg(vcpu->arch.db[0], 0); in svm_sync_dirty_debug_regs()
1603 get_debugreg(vcpu->arch.db[1], 1); in svm_sync_dirty_debug_regs()
1604 get_debugreg(vcpu->arch.db[2], 2); in svm_sync_dirty_debug_regs()
1605 get_debugreg(vcpu->arch.db[3], 3); in svm_sync_dirty_debug_regs()
1606 vcpu->arch.dr6 = svm_get_dr6(vcpu); in svm_sync_dirty_debug_regs()
1607 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1609 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in svm_sync_dirty_debug_regs()
1613 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) in svm_set_dr7() argument
1615 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7()
1632 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu)) in pf_interception()
1633 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); in pf_interception()
1634 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in pf_interception()
1656 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1658 if (!(svm->vcpu.guest_debug & in db_interception()
1661 kvm_queue_exception(&svm->vcpu, DB_VECTOR); in db_interception()
1667 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) in db_interception()
1672 if (svm->vcpu.guest_debug & in db_interception()
1686 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1698 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); in ud_interception()
1700 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in ud_interception()
1706 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1710 static void svm_fpu_activate(struct kvm_vcpu *vcpu) in svm_fpu_activate() argument
1712 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_activate()
1716 svm->vcpu.fpu_active = 1; in svm_fpu_activate()
1722 svm_fpu_activate(&svm->vcpu); in nm_interception()
1774 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1797 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1812 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception() local
1817 ++svm->vcpu.stat.io_exits; in io_interception()
1821 return emulate_instruction(vcpu, 0) == EMULATE_DONE; in io_interception()
1826 skip_emulated_instruction(&svm->vcpu); in io_interception()
1828 return kvm_fast_pio_out(vcpu, size, port); in io_interception()
1838 ++svm->vcpu.stat.irq_exits; in intr_interception()
1849 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; in halt_interception()
1850 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
1855 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmmcall_interception()
1856 kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
1860 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) in nested_svm_get_tdp_cr3() argument
1862 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3()
1867 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) in nested_svm_get_tdp_pdptr() argument
1869 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr()
1874 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, in nested_svm_get_tdp_pdptr()
1881 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, in nested_svm_set_tdp_cr3() argument
1884 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_set_tdp_cr3()
1888 svm_flush_tlb(vcpu); in nested_svm_set_tdp_cr3()
1891 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, in nested_svm_inject_npf_exit() argument
1894 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit()
1920 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) in nested_svm_init_mmu_context() argument
1922 WARN_ON(mmu_is_nested(vcpu)); in nested_svm_init_mmu_context()
1923 kvm_init_shadow_mmu(vcpu); in nested_svm_init_mmu_context()
1924 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; in nested_svm_init_mmu_context()
1925 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; in nested_svm_init_mmu_context()
1926 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr; in nested_svm_init_mmu_context()
1927 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; in nested_svm_init_mmu_context()
1928 vcpu->arch.mmu.shadow_root_level = get_npt_level(); in nested_svm_init_mmu_context()
1929 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu); in nested_svm_init_mmu_context()
1930 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_svm_init_mmu_context()
1933 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) in nested_svm_uninit_mmu_context() argument
1935 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_svm_uninit_mmu_context()
1940 if (!(svm->vcpu.arch.efer & EFER_SVME) in nested_svm_check_permissions()
1941 || !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
1942 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
1947 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
1959 if (!is_guest_mode(&svm->vcpu)) in nested_svm_check_exception()
1965 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_check_exception()
1977 if (!is_guest_mode(&svm->vcpu)) in nested_svm_intr()
1980 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_intr()
1983 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) in nested_svm_intr()
2016 if (!is_guest_mode(&svm->vcpu)) in nested_svm_nmi()
2034 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT); in nested_svm_map()
2043 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_map()
2073 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
2087 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
2098 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
2245 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
2257 nested_vmcb->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
2258 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
2259 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
2261 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
2262 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
2303 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_vmexit()
2309 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
2310 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
2321 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
2322 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
2323 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
2324 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
2327 svm->vcpu.arch.cr3 = hsave->save.cr3; in nested_svm_vmexit()
2329 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); in nested_svm_vmexit()
2331 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax); in nested_svm_vmexit()
2332 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp); in nested_svm_vmexit()
2333 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip); in nested_svm_vmexit()
2342 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
2343 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmexit()
2344 kvm_mmu_load(&svm->vcpu); in nested_svm_vmexit()
2371 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
2433 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
2434 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
2446 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
2447 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
2448 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
2449 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
2450 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
2456 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
2460 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) in nested_svm_vmrun()
2461 svm->vcpu.arch.hflags |= HF_HIF_MASK; in nested_svm_vmrun()
2463 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; in nested_svm_vmrun()
2466 kvm_mmu_unload(&svm->vcpu); in nested_svm_vmrun()
2468 nested_svm_init_mmu_context(&svm->vcpu); in nested_svm_vmrun()
2478 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); in nested_svm_vmrun()
2479 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); in nested_svm_vmrun()
2480 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); in nested_svm_vmrun()
2481 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); in nested_svm_vmrun()
2484 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; in nested_svm_vmrun()
2486 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); in nested_svm_vmrun()
2489 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmrun()
2491 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; in nested_svm_vmrun()
2492 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); in nested_svm_vmrun()
2493 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); in nested_svm_vmrun()
2494 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); in nested_svm_vmrun()
2513 svm_flush_tlb(&svm->vcpu); in nested_svm_vmrun()
2516 svm->vcpu.arch.hflags |= HF_VINTR_MASK; in nested_svm_vmrun()
2518 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; in nested_svm_vmrun()
2520 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { in nested_svm_vmrun()
2539 enter_guest_mode(&svm->vcpu); in nested_svm_vmrun()
2584 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmload_interception()
2585 skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2605 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in vmsave_interception()
2606 skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2620 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); in vmrun_interception()
2647 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in stgi_interception()
2648 skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2649 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in stgi_interception()
2661 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in clgi_interception()
2662 skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2677 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception() local
2679 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), in invlpga_interception()
2680 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2683 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in invlpga_interception()
2685 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in invlpga_interception()
2686 skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2692 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); in skinit_interception()
2694 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2700 kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2706 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2707 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in xsetbv_interception()
2709 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2710 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; in xsetbv_interception()
2711 skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2747 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2756 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2759 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2770 skip_emulated_instruction(&svm->vcpu); in task_switch_interception()
2775 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2777 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in task_switch_interception()
2778 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in task_switch_interception()
2779 svm->vcpu.run->internal.ndata = 0; in task_switch_interception()
2787 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in cpuid_interception()
2788 kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2794 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2796 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2797 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2798 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2805 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in invlpg_interception()
2807 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2808 skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2814 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; in emulate_on_interception()
2824 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2825 kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2833 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2839 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2877 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
2881 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2887 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2890 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2893 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2897 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2903 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2906 val = svm->vcpu.arch.cr2; in cr_interception()
2909 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2912 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2915 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
2919 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2922 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
2924 kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
2934 if (svm->vcpu.guest_debug == 0) { in dr_interception()
2941 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2952 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
2954 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
2955 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
2957 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
2959 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
2960 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
2963 skip_emulated_instruction(&svm->vcpu); in dr_interception()
2970 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
2973 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
2976 if (lapic_in_kernel(&svm->vcpu)) in cr8_write_interception()
2978 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
2984 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in svm_read_l1_tsc() argument
2986 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); in svm_read_l1_tsc()
2990 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in svm_get_msr() argument
2992 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr()
2997 kvm_scale_tsc(vcpu, rdtsc()); in svm_get_msr()
3057 return kvm_get_msr_common(vcpu, msr_info); in svm_get_msr()
3064 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in rdmsr_interception()
3069 if (svm_get_msr(&svm->vcpu, &msr_info)) { in rdmsr_interception()
3071 kvm_inject_gp(&svm->vcpu, 0); in rdmsr_interception()
3075 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, in rdmsr_interception()
3077 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, in rdmsr_interception()
3079 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in rdmsr_interception()
3080 skip_emulated_instruction(&svm->vcpu); in rdmsr_interception()
3085 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) in svm_set_vm_cr() argument
3087 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr()
3104 if (svm_dis && (vcpu->arch.efer & EFER_SVME)) in svm_set_vm_cr()
3110 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in svm_set_msr() argument
3112 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr()
3118 kvm_write_tsc(vcpu, msr); in svm_set_msr()
3150 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", in svm_set_msr()
3168 return svm_set_vm_cr(vcpu, data); in svm_set_msr()
3170 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); in svm_set_msr()
3173 return kvm_set_msr_common(vcpu, msr); in svm_set_msr()
3181 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); in wrmsr_interception()
3182 u64 data = kvm_read_edx_eax(&svm->vcpu); in wrmsr_interception()
3188 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; in wrmsr_interception()
3189 if (kvm_set_msr(&svm->vcpu, &msr)) { in wrmsr_interception()
3191 kvm_inject_gp(&svm->vcpu, 0); in wrmsr_interception()
3194 skip_emulated_instruction(&svm->vcpu); in wrmsr_interception()
3209 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
3213 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
3219 kvm_vcpu_on_spin(&(svm->vcpu)); in pause_interception()
3225 skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
3306 static void dump_vmcb(struct kvm_vcpu *vcpu) in dump_vmcb() argument
3308 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb()
3411 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) in svm_get_exit_info() argument
3413 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; in svm_get_exit_info()
3419 static int handle_exit(struct kvm_vcpu *vcpu) in handle_exit() argument
3421 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit()
3422 struct kvm_run *kvm_run = vcpu->run; in handle_exit()
3425 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); in handle_exit()
3428 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3430 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3439 if (is_guest_mode(vcpu)) { in handle_exit()
3465 dump_vmcb(vcpu); in handle_exit()
3481 kvm_queue_exception(vcpu, UD_VECTOR); in handle_exit()
3488 static void reload_tss(struct kvm_vcpu *vcpu) in reload_tss() argument
3508 static void svm_inject_nmi(struct kvm_vcpu *vcpu) in svm_inject_nmi() argument
3510 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi()
3513 vcpu->arch.hflags |= HF_NMI_MASK; in svm_inject_nmi()
3515 ++vcpu->stat.nmi_injections; in svm_inject_nmi()
3530 static void svm_set_irq(struct kvm_vcpu *vcpu) in svm_set_irq() argument
3532 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq()
3536 trace_kvm_inj_virq(vcpu->arch.interrupt.nr); in svm_set_irq()
3537 ++vcpu->stat.irq_injections; in svm_set_irq()
3539 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3543 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) in update_cr8_intercept() argument
3545 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept()
3547 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in update_cr8_intercept()
3559 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) in svm_set_virtual_x2apic_mode() argument
3564 static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu) in svm_cpu_uses_apicv() argument
3569 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu) in svm_load_eoi_exitmap() argument
3574 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) in svm_sync_pir_to_irr() argument
3579 static int svm_nmi_allowed(struct kvm_vcpu *vcpu) in svm_nmi_allowed() argument
3581 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed()
3585 !(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_allowed()
3591 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) in svm_get_nmi_mask() argument
3593 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask()
3595 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3598 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) in svm_set_nmi_mask() argument
3600 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask()
3603 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3606 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3611 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) in svm_interrupt_allowed() argument
3613 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed()
3621 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); in svm_interrupt_allowed()
3623 if (is_guest_mode(vcpu)) in svm_interrupt_allowed()
3624 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); in svm_interrupt_allowed()
3629 static void enable_irq_window(struct kvm_vcpu *vcpu) in enable_irq_window() argument
3631 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window()
3645 static void enable_nmi_window(struct kvm_vcpu *vcpu) in enable_nmi_window() argument
3647 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window()
3649 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3666 static void svm_flush_tlb(struct kvm_vcpu *vcpu) in svm_flush_tlb() argument
3668 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb()
3676 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) in svm_prepare_guest_switch() argument
3680 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) in sync_cr8_to_lapic() argument
3682 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic()
3684 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in sync_cr8_to_lapic()
3689 kvm_set_cr8(vcpu, cr8); in sync_cr8_to_lapic()
3693 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) in sync_lapic_to_cr8() argument
3695 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8()
3698 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) in sync_lapic_to_cr8()
3701 cr8 = kvm_get_cr8(vcpu); in sync_lapic_to_cr8()
3719 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3720 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3721 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3722 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3725 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3726 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3727 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3732 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3739 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3749 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3750 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3751 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3757 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3760 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3763 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3770 static void svm_cancel_injection(struct kvm_vcpu *vcpu) in svm_cancel_injection() argument
3772 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection()
3781 static void svm_vcpu_run(struct kvm_vcpu *vcpu) in svm_vcpu_run() argument
3783 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run()
3785 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3786 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3787 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3798 sync_lapic_to_cr8(vcpu); in svm_vcpu_run()
3800 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3854 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), in svm_vcpu_run()
3855 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), in svm_vcpu_run()
3856 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), in svm_vcpu_run()
3857 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), in svm_vcpu_run()
3858 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), in svm_vcpu_run()
3859 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) in svm_vcpu_run()
3861 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), in svm_vcpu_run()
3862 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), in svm_vcpu_run()
3863 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), in svm_vcpu_run()
3864 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), in svm_vcpu_run()
3865 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), in svm_vcpu_run()
3866 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), in svm_vcpu_run()
3867 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), in svm_vcpu_run()
3868 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) in svm_vcpu_run()
3888 reload_tss(vcpu); in svm_vcpu_run()
3892 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3893 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3898 kvm_before_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3905 kvm_after_handle_nmi(&svm->vcpu); in svm_vcpu_run()
3907 sync_cr8_to_lapic(vcpu); in svm_vcpu_run()
3918 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); in svm_vcpu_run()
3919 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); in svm_vcpu_run()
3933 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) in svm_set_cr3() argument
3935 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr3()
3939 svm_flush_tlb(vcpu); in svm_set_cr3()
3942 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) in set_tdp_cr3() argument
3944 struct vcpu_svm *svm = to_svm(vcpu); in set_tdp_cr3()
3950 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); in set_tdp_cr3()
3953 svm_flush_tlb(vcpu); in set_tdp_cr3()
3968 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) in svm_patch_hypercall() argument
3993 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) in svm_get_mt_mask() argument
3998 static void svm_cpuid_update(struct kvm_vcpu *vcpu) in svm_cpuid_update() argument
4000 struct vcpu_svm *svm = to_svm(vcpu); in svm_cpuid_update()
4003 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu); in svm_cpuid_update()
4063 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu) in svm_fpu_deactivate() argument
4065 struct vcpu_svm *svm = to_svm(vcpu); in svm_fpu_deactivate()
4134 static int svm_check_intercept(struct kvm_vcpu *vcpu, in svm_check_intercept() argument
4138 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept()
4172 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; in svm_check_intercept()
4254 static void svm_handle_external_intr(struct kvm_vcpu *vcpu) in svm_handle_external_intr() argument
4259 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) in svm_sched_in() argument