Lines Matching refs:vcpu
1041 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) in kvm_is_write_fault() argument
1043 if (kvm_vcpu_trap_is_iabt(vcpu)) in kvm_is_write_fault()
1046 return kvm_vcpu_dabt_iswrite(vcpu); in kvm_is_write_fault()
1208 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in coherent_cache_guest_page() argument
1211 __coherent_cache_guest_page(vcpu, pfn, size, uncached); in coherent_cache_guest_page()
1214 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, in user_mem_abort() argument
1222 struct kvm *kvm = vcpu->kvm; in user_mem_abort()
1223 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in user_mem_abort()
1231 write_fault = kvm_is_write_fault(vcpu); in user_mem_abort()
1271 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
1323 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); in user_mem_abort()
1333 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); in user_mem_abort()
1349 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) in handle_access_fault() argument
1358 spin_lock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1360 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); in handle_access_fault()
1379 spin_unlock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1396 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_guest_abort() argument
1406 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); in kvm_handle_guest_abort()
1407 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); in kvm_handle_guest_abort()
1409 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), in kvm_handle_guest_abort()
1410 kvm_vcpu_get_hfar(vcpu), fault_ipa); in kvm_handle_guest_abort()
1413 fault_status = kvm_vcpu_trap_get_fault_type(vcpu); in kvm_handle_guest_abort()
1417 kvm_vcpu_trap_get_class(vcpu), in kvm_handle_guest_abort()
1418 (unsigned long)kvm_vcpu_trap_get_fault(vcpu), in kvm_handle_guest_abort()
1419 (unsigned long)kvm_vcpu_get_hsr(vcpu)); in kvm_handle_guest_abort()
1423 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_handle_guest_abort()
1426 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1428 write_fault = kvm_is_write_fault(vcpu); in kvm_handle_guest_abort()
1432 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in kvm_handle_guest_abort()
1443 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); in kvm_handle_guest_abort()
1444 ret = io_mem_abort(vcpu, run, fault_ipa); in kvm_handle_guest_abort()
1452 handle_access_fault(vcpu, fault_ipa); in kvm_handle_guest_abort()
1457 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); in kvm_handle_guest_abort()
1461 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_handle_guest_abort()
1624 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) in kvm_mmu_free_memory_caches() argument
1626 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_mmu_free_memory_caches()
1894 void kvm_set_way_flush(struct kvm_vcpu *vcpu) in kvm_set_way_flush() argument
1896 unsigned long hcr = vcpu_get_hcr(vcpu); in kvm_set_way_flush()
1908 trace_kvm_set_way_flush(*vcpu_pc(vcpu), in kvm_set_way_flush()
1909 vcpu_has_cache_enabled(vcpu)); in kvm_set_way_flush()
1910 stage2_flush_vm(vcpu->kvm); in kvm_set_way_flush()
1911 vcpu_set_hcr(vcpu, hcr | HCR_TVM); in kvm_set_way_flush()
1915 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) in kvm_toggle_cache() argument
1917 bool now_enabled = vcpu_has_cache_enabled(vcpu); in kvm_toggle_cache()
1925 stage2_flush_vm(vcpu->kvm); in kvm_toggle_cache()
1929 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); in kvm_toggle_cache()
1931 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); in kvm_toggle_cache()