Lines Matching refs:arch

1520 	if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {  in vmx_segment_cache_test_set()
1521 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1721 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1755 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1989 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; in vmx_fpu_activate()
1991 vcpu->arch.cr0_guest_owned_bits &= in vmx_fpu_activate()
1993 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_activate()
2022 vcpu->arch.cr0_guest_owned_bits = 0; in vmx_fpu_deactivate()
2023 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_deactivate()
2035 (vcpu->arch.cr0 & X86_CR0_TS); in vmx_fpu_deactivate()
2038 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); in vmx_fpu_deactivate()
2045 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { in vmx_get_rflags()
2046 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_get_rflags()
2060 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_set_rflags()
2146 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_queue_exception()
2154 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2191 apic_x2apic_mode(vcpu->arch.apic)) { in vmx_set_msr_bitmap()
2235 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2286 vcpu->arch.tsc_catchup = 1; in vmx_set_tsc_khz()
2287 vcpu->arch.tsc_always_catchup = 1; in vmx_set_tsc_khz()
2697 data = vcpu->arch.ia32_xss; in vmx_get_msr()
2771 vcpu->arch.pat = data; in vmx_set_msr()
2799 vcpu->arch.ia32_xss = data; in vmx_set_msr()
2800 if (vcpu->arch.ia32_xss != host_xss) in vmx_set_msr()
2802 vcpu->arch.ia32_xss, host_xss); in vmx_set_msr()
2836 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in vmx_cache_reg()
2839 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2842 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
3388 if (!vcpu->kvm->arch.tss_addr) in enter_rmode()
3394 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); in enter_rmode()
3430 vcpu->arch.efer = efer; in vmx_set_efer()
3458 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
3464 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
3473 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in vmx_flush_tlb()
3475 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in vmx_flush_tlb()
3481 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3483 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3484 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3490 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_decache_cr3()
3491 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in vmx_decache_cr3()
3496 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3498 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3499 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3504 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs()
3507 (unsigned long *)&vcpu->arch.regs_dirty)) in ept_load_pdptrs()
3520 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3530 (unsigned long *)&vcpu->arch.regs_avail); in ept_save_pdptrs()
3532 (unsigned long *)&vcpu->arch.regs_dirty); in ept_save_pdptrs()
3541 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) in ept_update_paging_mode_cr0()
3549 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3557 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3584 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3600 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3632 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; in vmx_set_cr3()
3667 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
4048 fn = kvm->arch.tss_addr >> PAGE_SHIFT; in init_rmode_tss()
4084 if (likely(kvm->arch.ept_identity_pagetable_done)) in init_rmode_identity_map()
4087 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; in init_rmode_identity_map()
4106 kvm->arch.ept_identity_pagetable_done = true; in init_rmode_identity_map()
4138 if (kvm->arch.apic_access_page_done) in alloc_apic_access_page()
4159 kvm->arch.apic_access_page_done = true; in alloc_apic_access_page()
4172 BUG_ON(kvm->arch.ept_identity_pagetable_done); in alloc_identity_pagetable()
4177 kvm->arch.ept_identity_map_addr; in alloc_identity_pagetable()
4530 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4532 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4534 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4536 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4552 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4696 vmx->vcpu.arch.pat = host_pat; in vmx_vcpu_setup()
4738 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4796 __pa(vmx->vcpu.arch.apic->regs)); in vmx_vcpu_reset()
4808 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; in vmx_vcpu_reset()
4872 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
4879 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
4880 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
4886 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
4889 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4989 kvm->arch.tss_addr = addr; in vmx_set_tss_addr()
5001 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5034 if (vcpu->arch.halt_request) { in handle_rmode_exception()
5035 vcpu->arch.halt_request = 0; in handle_rmode_exception()
5155 vcpu->arch.dr6 &= ~15; in handle_exception()
5156 vcpu->arch.dr6 |= dr6 | DR6_RTM; in handle_exception()
5163 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; in handle_exception()
5164 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); in handle_exception()
5172 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5176 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; in handle_exception()
5177 kvm_run->debug.arch.exception = ex_no; in handle_exception()
5306 vcpu->arch.cr0 &= ~X86_CR0_TS; in handle_clts()
5413 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; in handle_dr()
5414 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5415 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5416 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5420 vcpu->arch.dr6 &= ~15; in handle_dr()
5421 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; in handle_dr()
5439 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5460 return vcpu->arch.dr6; in vmx_get_dr6()
5471 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5472 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5473 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5474 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5475 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5476 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5478 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5498 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_rdmsr()
5510 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; in handle_rdmsr()
5511 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; in handle_rdmsr()
5519 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_wrmsr()
5520 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) in handle_wrmsr()
5521 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); in handle_wrmsr()
5696 vcpu->arch.nmi_injected = false; in handle_task_switch()
5788 vcpu->arch.exit_qualification = exit_qualification; in handle_ept_violation()
5942 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
5943 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
6510 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, in nested_vmx_check_vmptr()
7038 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, in handle_vmread()
7074 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, in handle_vmwrite()
7167 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, in handle_vmptrst()
7223 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, in handle_invept()
7382 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; in nested_vmx_exit_handled_msr()
7788 vcpu->arch.nmi_pending) { in vmx_handle_exit()
8088 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
8102 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
8111 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8121 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8190 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8191 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
8192 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8193 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
8290 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), in vmx_vcpu_run()
8291 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), in vmx_vcpu_run()
8292 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), in vmx_vcpu_run()
8293 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), in vmx_vcpu_run()
8294 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), in vmx_vcpu_run()
8295 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), in vmx_vcpu_run()
8296 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), in vmx_vcpu_run()
8298 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), in vmx_vcpu_run()
8299 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), in vmx_vcpu_run()
8300 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), in vmx_vcpu_run()
8301 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), in vmx_vcpu_run()
8302 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), in vmx_vcpu_run()
8303 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), in vmx_vcpu_run()
8304 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), in vmx_vcpu_run()
8305 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), in vmx_vcpu_run()
8307 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), in vmx_vcpu_run()
8335 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) in vmx_vcpu_run()
8340 vcpu->arch.regs_dirty = 0; in vmx_vcpu_run()
8445 if (!kvm->arch.ept_identity_map_addr) in vmx_create_vcpu()
8446 kvm->arch.ept_identity_map_addr = in vmx_create_vcpu()
8602 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); in nested_ept_inject_page_fault()
8620 vcpu->arch.mmu.set_cr3 = vmx_set_cr3; in nested_ept_init_mmu_context()
8621 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; in nested_ept_init_mmu_context()
8622 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
8624 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
8629 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_ept_uninit_mmu_context()
8740 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
8752 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
8951 if (apic_x2apic_mode(vcpu->arch.apic) && e->index >> 8 == 0x8) in nested_vmx_msr_check_common()
9114 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
9286 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
9287 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
9305 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
9307 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9337 vcpu->arch.efer = vmcs12->guest_ia32_efer; in prepare_vmcs02()
9339 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in prepare_vmcs02()
9341 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in prepare_vmcs02()
9343 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
9364 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; in prepare_vmcs02()
9599 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
9602 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
9609 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
9612 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
9621 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { in vmcs12_save_pending_event()
9622 nr = vcpu->arch.exception.nr; in vmcs12_save_pending_event()
9627 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
9632 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
9635 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
9639 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
9642 } else if (vcpu->arch.interrupt.pending) { in vmcs12_save_pending_event()
9643 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
9646 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
9649 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
9669 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { in vmx_check_nested_events()
9671 vcpu->arch.interrupt.pending) in vmx_check_nested_events()
9680 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
9705 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
9774 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in prepare_vmcs12()
9820 vmcs12->guest_ia32_efer = vcpu->arch.efer; in prepare_vmcs12()
9860 vcpu->arch.nmi_injected = false; in prepare_vmcs12()
9880 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
9882 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
9884 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
9885 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
9903 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); in load_vmcs12_host_state()
9904 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in load_vmcs12_host_state()
9910 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
9919 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; in load_vmcs12_host_state()
9943 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
10098 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_vmx_vmexit()