hyp_pgd           347 arch/arm/include/asm/kvm_mmu.h 				       pgd_t *hyp_pgd,
hyp_pgd           386 arch/arm64/include/asm/kvm_mmu.h 				       pgd_t *hyp_pgd,
hyp_pgd           399 arch/arm64/include/asm/kvm_mmu.h 	pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
hyp_pgd            26 virt/kvm/arm/mmu.c static pgd_t *hyp_pgd;
hyp_pgd           579 virt/kvm/arm/mmu.c 	id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
hyp_pgd           594 virt/kvm/arm/mmu.c 	if (hyp_pgd) {
hyp_pgd           595 virt/kvm/arm/mmu.c 		unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
hyp_pgd           598 virt/kvm/arm/mmu.c 		free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd           599 virt/kvm/arm/mmu.c 		hyp_pgd = NULL;
hyp_pgd           767 virt/kvm/arm/mmu.c 		err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
hyp_pgd           781 virt/kvm/arm/mmu.c 	pgd_t *pgd = hyp_pgd;
hyp_pgd          2164 virt/kvm/arm/mmu.c 		return virt_to_phys(hyp_pgd);
hyp_pgd          2221 virt/kvm/arm/mmu.c 	hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
hyp_pgd          2222 virt/kvm/arm/mmu.c 	if (!hyp_pgd) {
hyp_pgd          2246 virt/kvm/arm/mmu.c 		__kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
hyp_pgd          2249 virt/kvm/arm/mmu.c 		err = kvm_map_idmap_text(hyp_pgd);