Lines Matching refs:arch

1594 	if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {  in vmx_segment_cache_test_set()
1595 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1795 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1829 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
2089 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { in vmx_vcpu_load()
2090 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; in vmx_vcpu_load()
2134 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; in vmx_fpu_activate()
2136 vcpu->arch.cr0_guest_owned_bits &= in vmx_fpu_activate()
2138 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_activate()
2167 vcpu->arch.cr0_guest_owned_bits = 0; in vmx_fpu_deactivate()
2168 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in vmx_fpu_deactivate()
2180 (vcpu->arch.cr0 & X86_CR0_TS); in vmx_fpu_deactivate()
2183 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); in vmx_fpu_deactivate()
2190 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { in vmx_get_rflags()
2191 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_get_rflags()
2205 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); in vmx_set_rflags()
2291 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_queue_exception()
2299 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2335 else if (vcpu->arch.apic_base & X2APIC_ENABLE) { in vmx_set_msr_bitmap()
2379 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2831 msr_info->data = vcpu->arch.ia32_xss; in vmx_get_msr()
2904 vcpu->arch.pat = data; in vmx_set_msr()
2932 vcpu->arch.ia32_xss = data; in vmx_set_msr()
2933 if (vcpu->arch.ia32_xss != host_xss) in vmx_set_msr()
2935 vcpu->arch.ia32_xss, host_xss); in vmx_set_msr()
2969 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in vmx_cache_reg()
2972 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2975 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
3525 if (!vcpu->kvm->arch.tss_addr) in enter_rmode()
3531 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); in enter_rmode()
3567 vcpu->arch.efer = efer; in vmx_set_efer()
3595 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
3601 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
3610 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __vmx_flush_tlb()
3612 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); in __vmx_flush_tlb()
3623 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3625 vcpu->arch.cr0 &= ~cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3626 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; in vmx_decache_cr0_guest_bits()
3632 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_decache_cr3()
3633 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in vmx_decache_cr3()
3638 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3640 vcpu->arch.cr4 &= ~cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3641 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; in vmx_decache_cr4_guest_bits()
3646 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_load_pdptrs()
3649 (unsigned long *)&vcpu->arch.regs_dirty)) in ept_load_pdptrs()
3662 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3672 (unsigned long *)&vcpu->arch.regs_avail); in ept_save_pdptrs()
3674 (unsigned long *)&vcpu->arch.regs_dirty); in ept_save_pdptrs()
3683 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) in ept_update_paging_mode_cr0()
3691 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3699 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3726 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3742 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3774 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; in vmx_set_cr3()
3809 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
4190 fn = kvm->arch.tss_addr >> PAGE_SHIFT; in init_rmode_tss()
4226 if (likely(kvm->arch.ept_identity_pagetable_done)) in init_rmode_identity_map()
4229 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; in init_rmode_identity_map()
4248 kvm->arch.ept_identity_pagetable_done = true; in init_rmode_identity_map()
4279 if (kvm->arch.apic_access_page_done) in alloc_apic_access_page()
4297 kvm->arch.apic_access_page_done = true; in alloc_apic_access_page()
4309 BUG_ON(kvm->arch.ept_identity_pagetable_done); in alloc_identity_pagetable()
4312 kvm->arch.ept_identity_map_addr, PAGE_SIZE); in alloc_identity_pagetable()
4678 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4680 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4682 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4684 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4700 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4840 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in vmx_vcpu_setup()
4882 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4944 __pa(vcpu->arch.apic->regs)); in vmx_vcpu_reset()
4957 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
5021 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
5028 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
5029 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
5035 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
5038 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
5133 kvm->arch.tss_addr = addr; in vmx_set_tss_addr()
5145 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5178 if (vcpu->arch.halt_request) { in handle_rmode_exception()
5179 vcpu->arch.halt_request = 0; in handle_rmode_exception()
5299 vcpu->arch.dr6 &= ~15; in handle_exception()
5300 vcpu->arch.dr6 |= dr6 | DR6_RTM; in handle_exception()
5307 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; in handle_exception()
5308 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); in handle_exception()
5316 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5320 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; in handle_exception()
5321 kvm_run->debug.arch.exception = ex_no; in handle_exception()
5450 vcpu->arch.cr0 &= ~X86_CR0_TS; in handle_clts()
5557 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; in handle_dr()
5558 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5559 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5560 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5564 vcpu->arch.dr6 &= ~15; in handle_dr()
5565 vcpu->arch.dr6 |= DR6_BD | DR6_RTM; in handle_dr()
5583 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5604 return vcpu->arch.dr6; in vmx_get_dr6()
5615 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5616 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5617 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5618 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5619 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5620 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5622 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5642 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_rdmsr()
5656 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u; in handle_rdmsr()
5657 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u; in handle_rdmsr()
5665 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; in handle_wrmsr()
5666 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) in handle_wrmsr()
5667 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); in handle_wrmsr()
5831 vcpu->arch.nmi_injected = false; in handle_task_switch()
5920 vcpu->arch.exit_qualification = exit_qualification; in handle_ept_violation()
6005 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
6006 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
6650 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, in nested_vmx_check_vmptr()
7177 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, in handle_vmread()
7213 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, in handle_vmwrite()
7304 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, in handle_vmptrst()
7360 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, in handle_invept()
7419 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid, in handle_invvpid()
7585 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX]; in nested_vmx_exit_handled_msr()
8143 vcpu->arch.nmi_pending) { in vmx_handle_exit()
8299 u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap; in vmx_load_eoi_exitmap()
8449 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
8463 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
8472 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8482 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
8551 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8552 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
8553 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) in vmx_vcpu_run()
8554 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
8651 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), in vmx_vcpu_run()
8652 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), in vmx_vcpu_run()
8653 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), in vmx_vcpu_run()
8654 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), in vmx_vcpu_run()
8655 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), in vmx_vcpu_run()
8656 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), in vmx_vcpu_run()
8657 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), in vmx_vcpu_run()
8659 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), in vmx_vcpu_run()
8660 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), in vmx_vcpu_run()
8661 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), in vmx_vcpu_run()
8662 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), in vmx_vcpu_run()
8663 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), in vmx_vcpu_run()
8664 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), in vmx_vcpu_run()
8665 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), in vmx_vcpu_run()
8666 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), in vmx_vcpu_run()
8668 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), in vmx_vcpu_run()
8696 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) in vmx_vcpu_run()
8701 vcpu->arch.regs_dirty = 0; in vmx_vcpu_run()
8805 if (!kvm->arch.ept_identity_map_addr) in vmx_create_vcpu()
8806 kvm->arch.ept_identity_map_addr = in vmx_create_vcpu()
8999 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); in nested_ept_inject_page_fault()
9017 vcpu->arch.mmu.set_cr3 = vmx_set_cr3; in nested_ept_init_mmu_context()
9018 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; in nested_ept_init_mmu_context()
9019 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; in nested_ept_init_mmu_context()
9021 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; in nested_ept_init_mmu_context()
9026 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in nested_ept_uninit_mmu_context()
9137 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
9149 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); in vmx_start_preemption_timer()
9348 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8) in nested_vmx_msr_check_common()
9514 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); in prepare_vmcs02()
9686 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; in prepare_vmcs02()
9687 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in prepare_vmcs02()
9705 vcpu->arch.pat = vmcs12->guest_ia32_pat; in prepare_vmcs02()
9707 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9749 vcpu->arch.efer = vmcs12->guest_ia32_efer; in prepare_vmcs02()
9751 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in prepare_vmcs02()
9753 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in prepare_vmcs02()
9755 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02()
9776 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; in prepare_vmcs02()
10011 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | in vmcs12_guest_cr0()
10014 vcpu->arch.cr0_guest_owned_bits)); in vmcs12_guest_cr0()
10021 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | in vmcs12_guest_cr4()
10024 vcpu->arch.cr4_guest_owned_bits)); in vmcs12_guest_cr4()
10033 if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { in vmcs12_save_pending_event()
10034 nr = vcpu->arch.exception.nr; in vmcs12_save_pending_event()
10039 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
10044 if (vcpu->arch.exception.has_error_code) { in vmcs12_save_pending_event()
10047 vcpu->arch.exception.error_code; in vmcs12_save_pending_event()
10051 } else if (vcpu->arch.nmi_injected) { in vmcs12_save_pending_event()
10054 } else if (vcpu->arch.interrupt.pending) { in vmcs12_save_pending_event()
10055 nr = vcpu->arch.interrupt.nr; in vmcs12_save_pending_event()
10058 if (vcpu->arch.interrupt.soft) { in vmcs12_save_pending_event()
10061 vcpu->arch.event_exit_inst_len; in vmcs12_save_pending_event()
10081 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { in vmx_check_nested_events()
10083 vcpu->arch.interrupt.pending) in vmx_check_nested_events()
10092 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
10117 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; in vmx_get_preemption_timer_value()
10186 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in prepare_vmcs12()
10232 vmcs12->guest_ia32_efer = vcpu->arch.efer; in prepare_vmcs12()
10272 vcpu->arch.nmi_injected = false; in prepare_vmcs12()
10292 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state()
10294 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state()
10296 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state()
10297 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
10315 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0); in load_vmcs12_host_state()
10316 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); in load_vmcs12_host_state()
10322 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); in load_vmcs12_host_state()
10331 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; in load_vmcs12_host_state()
10355 vcpu->arch.pat = vmcs12->host_ia32_pat; in load_vmcs12_host_state()
10510 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_vmx_vmexit()