Lines Matching refs:kvm
789 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); in nested_get_page()
812 static int vmx_vm_has_apicv(struct kvm *kvm);
813 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
823 static int alloc_identity_pagetable(struct kvm *kvm);
949 static inline bool vm_need_tpr_shadow(struct kvm *kvm) in vm_need_tpr_shadow() argument
951 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); in vm_need_tpr_shadow()
1065 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) in vm_need_virtualize_apic_accesses() argument
1067 return flexpriority_enabled && irqchip_in_kernel(kvm); in vm_need_virtualize_apic_accesses()
2190 else if (irqchip_in_kernel(vcpu->kvm) && in vmx_set_msr_bitmap()
2398 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) in nested_vmx_setup_ctls_msrs()
3388 if (!vcpu->kvm->arch.tss_addr) in enter_rmode()
3394 vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr); in enter_rmode()
3632 guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; in vmx_set_cr3()
4041 static int init_rmode_tss(struct kvm *kvm) in init_rmode_tss() argument
4047 idx = srcu_read_lock(&kvm->srcu); in init_rmode_tss()
4048 fn = kvm->arch.tss_addr >> PAGE_SHIFT; in init_rmode_tss()
4049 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); in init_rmode_tss()
4053 r = kvm_write_guest_page(kvm, fn++, &data, in init_rmode_tss()
4057 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); in init_rmode_tss()
4060 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); in init_rmode_tss()
4064 r = kvm_write_guest_page(kvm, fn, &data, in init_rmode_tss()
4068 srcu_read_unlock(&kvm->srcu, idx); in init_rmode_tss()
4072 static int init_rmode_identity_map(struct kvm *kvm) in init_rmode_identity_map() argument
4082 mutex_lock(&kvm->slots_lock); in init_rmode_identity_map()
4084 if (likely(kvm->arch.ept_identity_pagetable_done)) in init_rmode_identity_map()
4087 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; in init_rmode_identity_map()
4089 r = alloc_identity_pagetable(kvm); in init_rmode_identity_map()
4093 idx = srcu_read_lock(&kvm->srcu); in init_rmode_identity_map()
4094 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); in init_rmode_identity_map()
4101 r = kvm_write_guest_page(kvm, identity_map_pfn, in init_rmode_identity_map()
4106 kvm->arch.ept_identity_pagetable_done = true; in init_rmode_identity_map()
4109 srcu_read_unlock(&kvm->srcu, idx); in init_rmode_identity_map()
4112 mutex_unlock(&kvm->slots_lock); in init_rmode_identity_map()
4131 static int alloc_apic_access_page(struct kvm *kvm) in alloc_apic_access_page() argument
4137 mutex_lock(&kvm->slots_lock); in alloc_apic_access_page()
4138 if (kvm->arch.apic_access_page_done) in alloc_apic_access_page()
4144 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); in alloc_apic_access_page()
4148 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in alloc_apic_access_page()
4159 kvm->arch.apic_access_page_done = true; in alloc_apic_access_page()
4161 mutex_unlock(&kvm->slots_lock); in alloc_apic_access_page()
4165 static int alloc_identity_pagetable(struct kvm *kvm) in alloc_identity_pagetable() argument
4172 BUG_ON(kvm->arch.ept_identity_pagetable_done); in alloc_identity_pagetable()
4177 kvm->arch.ept_identity_map_addr; in alloc_identity_pagetable()
4179 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem); in alloc_identity_pagetable()
4361 static int vmx_vm_has_apicv(struct kvm *kvm) in vmx_vm_has_apicv() argument
4363 return enable_apicv && irqchip_in_kernel(kvm); in vmx_vm_has_apicv()
4543 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_pin_based_exec_ctrl()
4555 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { in vmx_exec_control()
4572 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4586 if (!vmx_vm_has_apicv(vmx->vcpu.kvm)) in vmx_secondary_exec_control()
4647 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) { in vmx_vcpu_setup()
4794 if (vm_need_tpr_shadow(vmx->vcpu.kvm)) in vmx_vcpu_reset()
4802 if (vmx_vm_has_apicv(vcpu->kvm)) in vmx_vcpu_reset()
4976 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) in vmx_set_tss_addr() argument
4986 ret = kvm_set_memory_region(kvm, &tss_mem); in vmx_set_tss_addr()
4989 kvm->arch.tss_addr = addr; in vmx_set_tss_addr()
4990 return init_rmode_tss(kvm); in vmx_set_tss_addr()
5343 if (irqchip_in_kernel(vcpu->kvm)) in handle_cr()
5560 if (!irqchip_in_kernel(vcpu->kvm) && in handle_interrupt_window()
7360 if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) in nested_vmx_exit_handled_io()
7404 if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) in nested_vmx_exit_handled_msr()
7671 struct kvm *kvm = vmx->vcpu.kvm; in vmx_flush_pml_buffer() local
7693 mark_page_dirty(kvm, gpa >> PAGE_SHIFT); in vmx_flush_pml_buffer()
7704 static void kvm_flush_pml_buffers(struct kvm *kvm) in kvm_flush_pml_buffers() argument
7714 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_flush_pml_buffers()
7837 !vmx_vm_has_apicv(vcpu->kvm)) in vmx_set_virtual_x2apic_mode()
7840 if (!vm_need_tpr_shadow(vcpu->kvm)) in vmx_set_virtual_x2apic_mode()
7880 static void vmx_hwapic_isr_update(struct kvm *kvm, int isr) in vmx_hwapic_isr_update() argument
7944 if (!vmx_vm_has_apicv(vcpu->kvm)) in vmx_load_eoi_exitmap()
8396 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) in vmx_create_vcpu() argument
8407 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
8438 if (vm_need_virtualize_apic_accesses(kvm)) { in vmx_create_vcpu()
8439 err = alloc_apic_access_page(kvm); in vmx_create_vcpu()
8445 if (!kvm->arch.ept_identity_map_addr) in vmx_create_vcpu()
8446 kvm->arch.ept_identity_map_addr = in vmx_create_vcpu()
8448 err = init_rmode_identity_map(kvm); in vmx_create_vcpu()
8522 else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) in vmx_get_mt_mask()
8993 if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), in nested_vmx_load_msr()
9026 if (kvm_read_guest(vcpu->kvm, in nested_vmx_store_msr()
9046 if (kvm_write_guest(vcpu->kvm, in nested_vmx_store_msr()
9213 (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) { in prepare_vmcs02()
10143 static void vmx_slot_enable_log_dirty(struct kvm *kvm, in vmx_slot_enable_log_dirty() argument
10146 kvm_mmu_slot_leaf_clear_dirty(kvm, slot); in vmx_slot_enable_log_dirty()
10147 kvm_mmu_slot_largepage_remove_write_access(kvm, slot); in vmx_slot_enable_log_dirty()
10150 static void vmx_slot_disable_log_dirty(struct kvm *kvm, in vmx_slot_disable_log_dirty() argument
10153 kvm_mmu_slot_set_dirty(kvm, slot); in vmx_slot_disable_log_dirty()
10156 static void vmx_flush_log_dirty(struct kvm *kvm) in vmx_flush_log_dirty() argument
10158 kvm_flush_pml_buffers(kvm); in vmx_flush_log_dirty()
10161 static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, in vmx_enable_log_dirty_pt_masked() argument
10165 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); in vmx_enable_log_dirty_pt_masked()