Lines Matching refs:end
207 phys_addr_t addr, phys_addr_t end) in unmap_ptes() argument
226 } while (pte++, addr += PAGE_SIZE, addr != end); in unmap_ptes()
233 phys_addr_t addr, phys_addr_t end) in unmap_pmds() argument
240 next = kvm_pmd_addr_end(addr, end); in unmap_pmds()
255 } while (pmd++, addr = next, addr != end); in unmap_pmds()
262 phys_addr_t addr, phys_addr_t end) in unmap_puds() argument
269 next = kvm_pud_addr_end(addr, end); in unmap_puds()
284 } while (pud++, addr = next, addr != end); in unmap_puds()
295 phys_addr_t addr = start, end = start + size; in unmap_range() local
300 next = kvm_pgd_addr_end(addr, end); in unmap_range()
303 } while (pgd++, addr = next, addr != end); in unmap_range()
307 phys_addr_t addr, phys_addr_t end) in stage2_flush_ptes() argument
315 } while (pte++, addr += PAGE_SIZE, addr != end); in stage2_flush_ptes()
319 phys_addr_t addr, phys_addr_t end) in stage2_flush_pmds() argument
326 next = kvm_pmd_addr_end(addr, end); in stage2_flush_pmds()
333 } while (pmd++, addr = next, addr != end); in stage2_flush_pmds()
337 phys_addr_t addr, phys_addr_t end) in stage2_flush_puds() argument
344 next = kvm_pud_addr_end(addr, end); in stage2_flush_puds()
351 } while (pud++, addr = next, addr != end); in stage2_flush_puds()
358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot() local
364 next = kvm_pgd_addr_end(addr, end); in stage2_flush_memslot()
366 } while (pgd++, addr = next, addr != end); in stage2_flush_memslot()
452 unsigned long end, unsigned long pfn, in create_hyp_pte_mappings() argument
465 } while (addr += PAGE_SIZE, addr != end); in create_hyp_pte_mappings()
469 unsigned long end, unsigned long pfn, in create_hyp_pmd_mappings() argument
493 next = pmd_addr_end(addr, end); in create_hyp_pmd_mappings()
497 } while (addr = next, addr != end); in create_hyp_pmd_mappings()
503 unsigned long end, unsigned long pfn, in create_hyp_pud_mappings() argument
526 next = pud_addr_end(addr, end); in create_hyp_pud_mappings()
531 } while (addr = next, addr != end); in create_hyp_pud_mappings()
537 unsigned long start, unsigned long end, in __create_hyp_mappings() argument
547 end = PAGE_ALIGN(end); in __create_hyp_mappings()
563 next = pgd_addr_end(addr, end); in __create_hyp_mappings()
568 } while (addr = next, addr != end); in __create_hyp_mappings()
599 unsigned long end = KERN_TO_HYP((unsigned long)to); in create_hyp_mappings() local
602 end = PAGE_ALIGN(end); in create_hyp_mappings()
604 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { in create_hyp_mappings()
631 unsigned long end = KERN_TO_HYP((unsigned long)to); in create_hyp_io_mappings() local
637 return __create_hyp_mappings(hyp_pgd, start, end, in create_hyp_io_mappings()
967 phys_addr_t addr, end; in kvm_phys_addr_ioremap() local
972 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_phys_addr_ioremap()
975 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { in kvm_phys_addr_ioremap()
1055 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) in stage2_wp_ptes() argument
1065 } while (pte++, addr += PAGE_SIZE, addr != end); in stage2_wp_ptes()
1074 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) in stage2_wp_pmds() argument
1082 next = kvm_pmd_addr_end(addr, end); in stage2_wp_pmds()
1091 } while (pmd++, addr = next, addr != end); in stage2_wp_pmds()
1102 static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) in stage2_wp_puds() argument
1109 next = kvm_pud_addr_end(addr, end); in stage2_wp_puds()
1115 } while (pud++, addr = next, addr != end); in stage2_wp_puds()
1124 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) in stage2_wp_range() argument
1141 next = kvm_pgd_addr_end(addr, end); in stage2_wp_range()
1144 } while (pgd++, addr = next, addr != end); in stage2_wp_range()
1164 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() local
1167 stage2_wp_range(kvm, start, end); in kvm_mmu_wp_memory_region()
1189 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked() local
1191 stage2_wp_range(kvm, start, end); in kvm_mmu_write_protect_pt_masked()
1467 unsigned long end, in handle_hva_to_gpa() argument
1484 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
1513 unsigned long end = hva + PAGE_SIZE; in kvm_unmap_hva() local
1519 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); in kvm_unmap_hva()
1524 unsigned long start, unsigned long end) in kvm_unmap_hva_range() argument
1529 trace_kvm_unmap_hva_range(start, end); in kvm_unmap_hva_range()
1530 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); in kvm_unmap_hva_range()
1552 unsigned long end = hva + PAGE_SIZE; in kvm_set_spte_hva() local
1560 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); in kvm_set_spte_hva()
1612 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva() argument
1614 trace_kvm_age_hva(start, end); in kvm_age_hva()
1615 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); in kvm_age_hva()