Lines Matching refs:vcpu

1041 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)  in kvm_is_write_fault()  argument
1043 if (kvm_vcpu_trap_is_iabt(vcpu)) in kvm_is_write_fault()
1046 return kvm_vcpu_dabt_iswrite(vcpu); in kvm_is_write_fault()
1209 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in coherent_cache_guest_page() argument
1212 __coherent_cache_guest_page(vcpu, pfn, size, uncached); in coherent_cache_guest_page()
1215 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, in user_mem_abort() argument
1223 struct kvm *kvm = vcpu->kvm; in user_mem_abort()
1224 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in user_mem_abort()
1232 write_fault = kvm_is_write_fault(vcpu); in user_mem_abort()
1272 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
1324 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); in user_mem_abort()
1334 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); in user_mem_abort()
1350 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) in handle_access_fault() argument
1359 spin_lock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1361 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); in handle_access_fault()
1380 spin_unlock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1397 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_guest_abort() argument
1407 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); in kvm_handle_guest_abort()
1408 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); in kvm_handle_guest_abort()
1410 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), in kvm_handle_guest_abort()
1411 kvm_vcpu_get_hfar(vcpu), fault_ipa); in kvm_handle_guest_abort()
1414 fault_status = kvm_vcpu_trap_get_fault_type(vcpu); in kvm_handle_guest_abort()
1418 kvm_vcpu_trap_get_class(vcpu), in kvm_handle_guest_abort()
1419 (unsigned long)kvm_vcpu_trap_get_fault(vcpu), in kvm_handle_guest_abort()
1420 (unsigned long)kvm_vcpu_get_hsr(vcpu)); in kvm_handle_guest_abort()
1424 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_handle_guest_abort()
1427 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1429 write_fault = kvm_is_write_fault(vcpu); in kvm_handle_guest_abort()
1433 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in kvm_handle_guest_abort()
1444 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); in kvm_handle_guest_abort()
1445 ret = io_mem_abort(vcpu, run, fault_ipa); in kvm_handle_guest_abort()
1453 handle_access_fault(vcpu, fault_ipa); in kvm_handle_guest_abort()
1458 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); in kvm_handle_guest_abort()
1462 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_handle_guest_abort()
1625 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) in kvm_mmu_free_memory_caches() argument
1627 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_mmu_free_memory_caches()
1896 void kvm_set_way_flush(struct kvm_vcpu *vcpu) in kvm_set_way_flush() argument
1898 unsigned long hcr = vcpu_get_hcr(vcpu); in kvm_set_way_flush()
1910 trace_kvm_set_way_flush(*vcpu_pc(vcpu), in kvm_set_way_flush()
1911 vcpu_has_cache_enabled(vcpu)); in kvm_set_way_flush()
1912 stage2_flush_vm(vcpu->kvm); in kvm_set_way_flush()
1913 vcpu_set_hcr(vcpu, hcr | HCR_TVM); in kvm_set_way_flush()
1917 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) in kvm_toggle_cache() argument
1919 bool now_enabled = vcpu_has_cache_enabled(vcpu); in kvm_toggle_cache()
1927 stage2_flush_vm(vcpu->kvm); in kvm_toggle_cache()
1931 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); in kvm_toggle_cache()
1933 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); in kvm_toggle_cache()