Lines Matching refs:kvm
64 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); in kvm_flush_remote_tlbs()
69 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) in kvm_tlb_flush_vmid_ipa() argument
77 if (kvm) in kvm_tlb_flush_vmid_ipa()
78 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); in kvm_tlb_flush_vmid_ipa()
115 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) in stage2_dissolve_pmd() argument
121 kvm_tlb_flush_vmid_ipa(kvm, addr); in stage2_dissolve_pmd()
157 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) in clear_pgd_entry() argument
161 kvm_tlb_flush_vmid_ipa(kvm, addr); in clear_pgd_entry()
166 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) in clear_pud_entry() argument
171 kvm_tlb_flush_vmid_ipa(kvm, addr); in clear_pud_entry()
176 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) in clear_pmd_entry() argument
181 kvm_tlb_flush_vmid_ipa(kvm, addr); in clear_pmd_entry()
206 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, in unmap_ptes() argument
218 kvm_tlb_flush_vmid_ipa(kvm, addr); in unmap_ptes()
228 if (kvm_pte_table_empty(kvm, start_pte)) in unmap_ptes()
229 clear_pmd_entry(kvm, pmd, start_addr); in unmap_ptes()
232 static void unmap_pmds(struct kvm *kvm, pud_t *pud, in unmap_pmds() argument
246 kvm_tlb_flush_vmid_ipa(kvm, addr); in unmap_pmds()
252 unmap_ptes(kvm, pmd, addr, next); in unmap_pmds()
257 if (kvm_pmd_table_empty(kvm, start_pmd)) in unmap_pmds()
258 clear_pud_entry(kvm, pud, start_addr); in unmap_pmds()
261 static void unmap_puds(struct kvm *kvm, pgd_t *pgd, in unmap_puds() argument
275 kvm_tlb_flush_vmid_ipa(kvm, addr); in unmap_puds()
281 unmap_pmds(kvm, pud, addr, next); in unmap_puds()
286 if (kvm_pud_table_empty(kvm, start_pud)) in unmap_puds()
287 clear_pgd_entry(kvm, pgd, start_addr); in unmap_puds()
291 static void unmap_range(struct kvm *kvm, pgd_t *pgdp, in unmap_range() argument
302 unmap_puds(kvm, pgd, addr, next); in unmap_range()
306 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, in stage2_flush_ptes() argument
318 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, in stage2_flush_pmds() argument
331 stage2_flush_ptes(kvm, pmd, addr, next); in stage2_flush_pmds()
336 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, in stage2_flush_puds() argument
349 stage2_flush_pmds(kvm, pud, addr, next); in stage2_flush_puds()
354 static void stage2_flush_memslot(struct kvm *kvm, in stage2_flush_memslot() argument
362 pgd = kvm->arch.pgd + kvm_pgd_index(addr); in stage2_flush_memslot()
365 stage2_flush_puds(kvm, pgd, addr, next); in stage2_flush_memslot()
376 static void stage2_flush_vm(struct kvm *kvm) in stage2_flush_vm() argument
382 idx = srcu_read_lock(&kvm->srcu); in stage2_flush_vm()
383 spin_lock(&kvm->mmu_lock); in stage2_flush_vm()
385 slots = kvm_memslots(kvm); in stage2_flush_vm()
387 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
389 spin_unlock(&kvm->mmu_lock); in stage2_flush_vm()
390 srcu_read_unlock(&kvm->srcu, idx); in stage2_flush_vm()
666 int kvm_alloc_stage2_pgd(struct kvm *kvm) in kvm_alloc_stage2_pgd() argument
671 if (kvm->arch.pgd != NULL) { in kvm_alloc_stage2_pgd()
724 kvm->arch.pgd = pgd; in kvm_alloc_stage2_pgd()
739 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) in unmap_stage2_range() argument
741 unmap_range(kvm, kvm->arch.pgd, start, size); in unmap_stage2_range()
744 static void stage2_unmap_memslot(struct kvm *kvm, in stage2_unmap_memslot() argument
779 unmap_stage2_range(kvm, gpa, vm_end - vm_start); in stage2_unmap_memslot()
792 void stage2_unmap_vm(struct kvm *kvm) in stage2_unmap_vm() argument
798 idx = srcu_read_lock(&kvm->srcu); in stage2_unmap_vm()
799 spin_lock(&kvm->mmu_lock); in stage2_unmap_vm()
801 slots = kvm_memslots(kvm); in stage2_unmap_vm()
803 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
805 spin_unlock(&kvm->mmu_lock); in stage2_unmap_vm()
806 srcu_read_unlock(&kvm->srcu, idx); in stage2_unmap_vm()
820 void kvm_free_stage2_pgd(struct kvm *kvm) in kvm_free_stage2_pgd() argument
822 if (kvm->arch.pgd == NULL) in kvm_free_stage2_pgd()
825 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); in kvm_free_stage2_pgd()
826 kvm_free_hwpgd(kvm_get_hwpgd(kvm)); in kvm_free_stage2_pgd()
828 kfree(kvm->arch.pgd); in kvm_free_stage2_pgd()
830 kvm->arch.pgd = NULL; in kvm_free_stage2_pgd()
833 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in stage2_get_pud() argument
839 pgd = kvm->arch.pgd + kvm_pgd_index(addr); in stage2_get_pud()
851 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in stage2_get_pmd() argument
857 pud = stage2_get_pud(kvm, cache, addr); in stage2_get_pmd()
869 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache in stage2_set_pmd_huge() argument
874 pmd = stage2_get_pmd(kvm, cache, addr); in stage2_set_pmd_huge()
891 kvm_tlb_flush_vmid_ipa(kvm, addr); in stage2_set_pmd_huge()
900 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in stage2_set_pte() argument
912 pmd = stage2_get_pmd(kvm, cache, addr); in stage2_set_pte()
926 stage2_dissolve_pmd(kvm, addr, pmd); in stage2_set_pte()
947 kvm_tlb_flush_vmid_ipa(kvm, addr); in stage2_set_pte()
964 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, in kvm_phys_addr_ioremap() argument
985 spin_lock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
986 ret = stage2_set_pte(kvm, &cache, addr, &pte, in kvm_phys_addr_ioremap()
988 spin_unlock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
1124 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) in stage2_wp_range() argument
1129 pgd = kvm->arch.pgd + kvm_pgd_index(addr); in stage2_wp_range()
1138 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) in stage2_wp_range()
1139 cond_resched_lock(&kvm->mmu_lock); in stage2_wp_range()
1160 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) in kvm_mmu_wp_memory_region() argument
1162 struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot); in kvm_mmu_wp_memory_region()
1166 spin_lock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
1167 stage2_wp_range(kvm, start, end); in kvm_mmu_wp_memory_region()
1168 spin_unlock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
1169 kvm_flush_remote_tlbs(kvm); in kvm_mmu_wp_memory_region()
1183 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked() argument
1191 stage2_wp_range(kvm, start, end); in kvm_mmu_write_protect_pt_masked()
1201 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
1205 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1222 struct kvm *kvm = vcpu->kvm; in user_mem_abort() local
1271 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
1283 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); in user_mem_abort()
1307 spin_lock(&kvm->mmu_lock); in user_mem_abort()
1308 if (mmu_notifier_retry(kvm, mmu_seq)) in user_mem_abort()
1324 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); in user_mem_abort()
1331 mark_page_dirty(kvm, gfn); in user_mem_abort()
1334 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); in user_mem_abort()
1338 spin_unlock(&kvm->mmu_lock); in user_mem_abort()
1358 spin_lock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1360 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa); in handle_access_fault()
1379 spin_unlock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1423 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_handle_guest_abort()
1426 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1461 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_handle_guest_abort()
1465 static int handle_hva_to_gpa(struct kvm *kvm, in handle_hva_to_gpa() argument
1468 int (*handler)(struct kvm *kvm, in handle_hva_to_gpa() argument
1476 slots = kvm_memslots(kvm); in handle_hva_to_gpa()
1498 ret |= handler(kvm, gpa, data); in handle_hva_to_gpa()
1505 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) in kvm_unmap_hva_handler() argument
1507 unmap_stage2_range(kvm, gpa, PAGE_SIZE); in kvm_unmap_hva_handler()
1511 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) in kvm_unmap_hva() argument
1515 if (!kvm->arch.pgd) in kvm_unmap_hva()
1519 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); in kvm_unmap_hva()
1523 int kvm_unmap_hva_range(struct kvm *kvm, in kvm_unmap_hva_range() argument
1526 if (!kvm->arch.pgd) in kvm_unmap_hva_range()
1530 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); in kvm_unmap_hva_range()
1534 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) in kvm_set_spte_handler() argument
1545 stage2_set_pte(kvm, NULL, gpa, pte, 0); in kvm_set_spte_handler()
1550 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) in kvm_set_spte_hva() argument
1555 if (!kvm->arch.pgd) in kvm_set_spte_hva()
1560 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); in kvm_set_spte_hva()
1563 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) in kvm_age_hva_handler() argument
1568 pmd = stage2_get_pmd(kvm, NULL, gpa); in kvm_age_hva_handler()
1593 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) in kvm_test_age_hva_handler() argument
1598 pmd = stage2_get_pmd(kvm, NULL, gpa); in kvm_test_age_hva_handler()
1612 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva() argument
1615 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); in kvm_age_hva()
1618 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) in kvm_test_age_hva() argument
1621 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); in kvm_test_age_hva()
1724 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
1735 kvm_mmu_wp_memory_region(kvm, mem->slot); in kvm_arch_commit_memory_region()
1738 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
1806 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
1818 spin_lock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
1820 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); in kvm_arch_prepare_memory_region()
1822 stage2_flush_memslot(kvm, memslot); in kvm_arch_prepare_memory_region()
1823 spin_unlock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
1827 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_arch_free_memslot() argument
1832 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_arch_create_memslot() argument
1847 void kvm_arch_memslots_updated(struct kvm *kvm) in kvm_arch_memslots_updated() argument
1851 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
1855 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
1861 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1862 unmap_stage2_range(kvm, gpa, size); in kvm_arch_flush_shadow_memslot()
1863 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1910 stage2_flush_vm(vcpu->kvm); in kvm_set_way_flush()
1925 stage2_flush_vm(vcpu->kvm); in kvm_toggle_cache()