pud                33 arch/arc/mm/fault.c 	pud_t *pud, *pud_k;
pud                42 arch/arc/mm/fault.c 	pud = pud_offset(pgd, address);
pud                47 arch/arc/mm/fault.c 	pmd = pmd_offset(pud, address);
pud                77 arch/arm/include/asm/kvm_mmu.h #define kvm_pud_pfn(pud)	({ WARN_ON(1); 0; })
pud                82 arch/arm/include/asm/kvm_mmu.h #define kvm_pud_mkhuge(pud)	( {WARN_ON(1); pud; })
pud                88 arch/arm/include/asm/kvm_mmu.h static inline void kvm_set_s2pud_readonly(pud_t *pud)
pud                93 arch/arm/include/asm/kvm_mmu.h static inline bool kvm_s2pud_readonly(pud_t *pud)
pud                99 arch/arm/include/asm/kvm_mmu.h static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
pud               104 arch/arm/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
pud               107 arch/arm/include/asm/kvm_mmu.h 	return pud;
pud               110 arch/arm/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkexec(pud_t pud)
pud               113 arch/arm/include/asm/kvm_mmu.h 	return pud;
pud               116 arch/arm/include/asm/kvm_mmu.h static inline bool kvm_s2pud_exec(pud_t *pud)
pud               122 arch/arm/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
pud               125 arch/arm/include/asm/kvm_mmu.h 	return pud;
pud               128 arch/arm/include/asm/kvm_mmu.h static inline bool kvm_s2pud_young(pud_t pud)
pud               327 arch/arm/include/asm/kvm_mmu.h static inline void __kvm_flush_dcache_pud(pud_t pud)
pud                36 arch/arm/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud                38 arch/arm/include/asm/pgalloc.h 	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
pud               180 arch/arm/include/asm/pgtable-2level.h #define pud_none(pud)		(0)
pud               181 arch/arm/include/asm/pgtable-2level.h #define pud_bad(pud)		(0)
pud               182 arch/arm/include/asm/pgtable-2level.h #define pud_present(pud)	(1)
pud               184 arch/arm/include/asm/pgtable-2level.h #define set_pud(pud,pudp)	do { } while (0)
pud               186 arch/arm/include/asm/pgtable-2level.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
pud               188 arch/arm/include/asm/pgtable-2level.h 	return (pmd_t *)pud;
pud               129 arch/arm/include/asm/pgtable-3level.h #define pud_none(pud)		(!pud_val(pud))
pud               130 arch/arm/include/asm/pgtable-3level.h #define pud_bad(pud)		(!(pud_val(pud) & 2))
pud               131 arch/arm/include/asm/pgtable-3level.h #define pud_present(pud)	(pud_val(pud))
pud               144 arch/arm/include/asm/pgtable-3level.h #define set_pud(pudp, pud)		\
pud               146 arch/arm/include/asm/pgtable-3level.h 		*pudp = pud;		\
pud               150 arch/arm/include/asm/pgtable-3level.h static inline pmd_t *pud_page_vaddr(pud_t pud)
pud               152 arch/arm/include/asm/pgtable-3level.h 	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
pud               157 arch/arm/include/asm/pgtable-3level.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
pud               159 arch/arm/include/asm/pgtable-3level.h 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
pud               213 arch/arm/include/asm/pgtable-3level.h #define pud_page(pud)		pmd_page(__pmd(pud_val(pud)))
pud               214 arch/arm/include/asm/pgtable-3level.h #define pud_write(pud)		pmd_write(__pmd(pud_val(pud)))
pud                22 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_populate(kvm, pgd, pud)	pgd_populate(NULL, pgd, pud)
pud                24 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_free(kvm, pud)		do { } while (0)
pud                26 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_none(kvm, pud)		pud_none(pud)
pud                27 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_clear(kvm, pud)		pud_clear(pud)
pud                28 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_present(kvm, pud)		pud_present(pud)
pud                29 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_populate(kvm, pud, pmd)	pud_populate(NULL, pud, pmd)
pud                30 arch/arm/include/asm/stage2_pgtable.h #define stage2_pmd_offset(kvm, pud, address)	pmd_offset(pud, address)
pud                33 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_huge(kvm, pud)		pud_huge(pud)
pud                29 arch/arm/lib/uaccess_with_memcpy.c 	pud_t *pud;
pud                36 arch/arm/lib/uaccess_with_memcpy.c 	pud = pud_offset(pgd, addr);
pud                37 arch/arm/lib/uaccess_with_memcpy.c 	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
pud                40 arch/arm/lib/uaccess_with_memcpy.c 	pmd = pmd_offset(pud, addr);
pud               334 arch/arm/mm/dump.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
pud               336 arch/arm/mm/dump.c 	pmd_t *pmd = pmd_offset(pud, 0);
pud               360 arch/arm/mm/dump.c 	pud_t *pud = pud_offset(pgd, 0);
pud               364 arch/arm/mm/dump.c 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
pud               366 arch/arm/mm/dump.c 		if (!pud_none(*pud)) {
pud               367 arch/arm/mm/dump.c 			walk_pmd(st, pud, addr);
pud               369 arch/arm/mm/dump.c 			note_page(st, addr, 2, pud_val(*pud), NULL);
pud                94 arch/arm/mm/fault-armv.c 	pud_t *pud;
pud               103 arch/arm/mm/fault-armv.c 	pud = pud_offset(pgd, address);
pud               104 arch/arm/mm/fault-armv.c 	if (pud_none_or_clear_bad(pud))
pud               107 arch/arm/mm/fault-armv.c 	pmd = pmd_offset(pud, address);
pud                46 arch/arm/mm/fault.c 		pud_t *pud;
pud                58 arch/arm/mm/fault.c 		pud = pud_offset(pgd, addr);
pud                60 arch/arm/mm/fault.c 			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
pud                62 arch/arm/mm/fault.c 		if (pud_none(*pud))
pud                65 arch/arm/mm/fault.c 		if (pud_bad(*pud)) {
pud                70 arch/arm/mm/fault.c 		pmd = pmd_offset(pud, addr);
pud               411 arch/arm/mm/fault.c 	pud_t *pud, *pud_k;
pud               430 arch/arm/mm/fault.c 	pud = pud_offset(pgd, addr);
pud               435 arch/arm/mm/fault.c 	if (!pud_present(*pud))
pud               436 arch/arm/mm/fault.c 		set_pud(pud, *pud_k);
pud               438 arch/arm/mm/fault.c 	pmd = pmd_offset(pud, addr);
pud                27 arch/arm/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                24 arch/arm/mm/idmap.c static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pud                30 arch/arm/mm/idmap.c 	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
pud                40 arch/arm/mm/idmap.c 		if (!pud_none(*pud))
pud                41 arch/arm/mm/idmap.c 			memcpy(pmd, pmd_offset(pud, 0),
pud                43 arch/arm/mm/idmap.c 		pud_populate(&init_mm, pud, pmd);
pud                46 arch/arm/mm/idmap.c 		pmd = pmd_offset(pud, addr);
pud                55 arch/arm/mm/idmap.c static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pud                58 arch/arm/mm/idmap.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud                71 arch/arm/mm/idmap.c 	pud_t *pud = pud_offset(pgd, addr);
pud                76 arch/arm/mm/idmap.c 		idmap_add_pmd(pud, addr, next, prot);
pud                77 arch/arm/mm/idmap.c 	} while (pud++, addr = next, addr != end);
pud               145 arch/arm/mm/ioremap.c 	pud_t *pud;
pud               150 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pud               151 arch/arm/mm/ioremap.c 	pmdp = pmd_offset(pud, addr);
pud               193 arch/arm/mm/ioremap.c 	pud_t *pud;
pud               203 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pud               204 arch/arm/mm/ioremap.c 	pmd = pmd_offset(pud, addr);
pud               225 arch/arm/mm/ioremap.c 	pud_t *pud;
pud               235 arch/arm/mm/ioremap.c 	pud = pud_offset(pgd, addr);
pud               236 arch/arm/mm/ioremap.c 	pmd = pmd_offset(pud, addr);
pud               378 arch/arm/mm/mmu.c 	pud_t *pud = pud_offset(pgd, addr);
pud               379 arch/arm/mm/mmu.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud               798 arch/arm/mm/mmu.c static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
pud               803 arch/arm/mm/mmu.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud               835 arch/arm/mm/mmu.c 	pud_t *pud = pud_offset(pgd, addr);
pud               840 arch/arm/mm/mmu.c 		alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
pud               842 arch/arm/mm/mmu.c 	} while (pud++, addr = next, addr != end);
pud               892 arch/arm/mm/mmu.c 		pud_t *pud = pud_offset(pgd, addr);
pud               893 arch/arm/mm/mmu.c 		pmd_t *pmd = pmd_offset(pud, addr);
pud               979 arch/arm/mm/mmu.c 	pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
pud               980 arch/arm/mm/mmu.c 	if (WARN_ON(!pud))
pud               982 arch/arm/mm/mmu.c 	pmd_alloc(mm, pud, 0);
pud               119 arch/arm/mm/pgd.c 	pud_t *pud;
pud               130 arch/arm/mm/pgd.c 	pud = pud_offset(pgd, 0);
pud               131 arch/arm/mm/pgd.c 	if (pud_none_or_clear_bad(pud))
pud               134 arch/arm/mm/pgd.c 	pmd = pmd_offset(pud, 0);
pud               143 arch/arm/mm/pgd.c 	pud_clear(pud);
pud               148 arch/arm/mm/pgd.c 	pud_free(mm, pud);
pud               159 arch/arm/mm/pgd.c 		pud = pud_offset(pgd, 0);
pud               160 arch/arm/mm/pgd.c 		if (pud_none_or_clear_bad(pud))
pud               162 arch/arm/mm/pgd.c 		pmd = pmd_offset(pud, 0);
pud               163 arch/arm/mm/pgd.c 		pud_clear(pud);
pud               167 arch/arm/mm/pgd.c 		pud_free(mm, pud);
pud               177 arch/arm64/include/asm/kvm_mmu.h #define kvm_set_pud(pudp, pud)		set_pud(pudp, pud)
pud               183 arch/arm64/include/asm/kvm_mmu.h #define kvm_pud_pfn(pud)		pud_pfn(pud)
pud               186 arch/arm64/include/asm/kvm_mmu.h #define kvm_pud_mkhuge(pud)		pud_mkhuge(pud)
pud               200 arch/arm64/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
pud               202 arch/arm64/include/asm/kvm_mmu.h 	pud_val(pud) |= PUD_S2_RDWR;
pud               203 arch/arm64/include/asm/kvm_mmu.h 	return pud;
pud               218 arch/arm64/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkexec(pud_t pud)
pud               220 arch/arm64/include/asm/kvm_mmu.h 	pud_val(pud) &= ~PUD_S2_XN;
pud               221 arch/arm64/include/asm/kvm_mmu.h 	return pud;
pud               277 arch/arm64/include/asm/kvm_mmu.h static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
pud               279 arch/arm64/include/asm/kvm_mmu.h 	return pud_mkyoung(pud);
pud               282 arch/arm64/include/asm/kvm_mmu.h static inline bool kvm_s2pud_young(pud_t pud)
pud               284 arch/arm64/include/asm/kvm_mmu.h 	return pud_young(pud);
pud               357 arch/arm64/include/asm/kvm_mmu.h static inline void __kvm_flush_dcache_pud(pud_t pud)
pud               360 arch/arm64/include/asm/kvm_mmu.h 		struct page *page = pud_page(pud);
pud                33 arch/arm64/include/asm/pgtable-types.h typedef struct { pudval_t pud; } pud_t;
pud                34 arch/arm64/include/asm/pgtable-types.h #define pud_val(x)	((x).pud)
pud               123 arch/arm64/include/asm/pgtable.h #define pud_access_permitted(pud, write) \
pud               124 arch/arm64/include/asm/pgtable.h 	(pte_access_permitted(pud_pte(pud), (write)))
pud               301 arch/arm64/include/asm/pgtable.h static inline pte_t pud_pte(pud_t pud)
pud               303 arch/arm64/include/asm/pgtable.h 	return __pte(pud_val(pud));
pud               311 arch/arm64/include/asm/pgtable.h static inline pmd_t pud_pmd(pud_t pud)
pud               313 arch/arm64/include/asm/pgtable.h 	return __pmd(pud_val(pud));
pud               391 arch/arm64/include/asm/pgtable.h #define pud_young(pud)		pte_young(pud_pte(pud))
pud               392 arch/arm64/include/asm/pgtable.h #define pud_mkyoung(pud)	pte_pud(pte_mkyoung(pud_pte(pud)))
pud               393 arch/arm64/include/asm/pgtable.h #define pud_write(pud)		pte_write(pud_pte(pud))
pud               395 arch/arm64/include/asm/pgtable.h #define pud_mkhuge(pud)		(__pud(pud_val(pud) & ~PUD_TABLE_BIT))
pud               397 arch/arm64/include/asm/pgtable.h #define __pud_to_phys(pud)	__pte_to_phys(pud_pte(pud))
pud               399 arch/arm64/include/asm/pgtable.h #define pud_pfn(pud)		((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
pud               446 arch/arm64/include/asm/pgtable.h static inline bool pud_sect(pud_t pud) { return false; }
pud               447 arch/arm64/include/asm/pgtable.h static inline bool pud_table(pud_t pud) { return true; }
pud               449 arch/arm64/include/asm/pgtable.h #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
pud               451 arch/arm64/include/asm/pgtable.h #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
pud               525 arch/arm64/include/asm/pgtable.h #define pud_none(pud)		(!pud_val(pud))
pud               526 arch/arm64/include/asm/pgtable.h #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
pud               527 arch/arm64/include/asm/pgtable.h #define pud_present(pud)	pte_present(pud_pte(pud))
pud               528 arch/arm64/include/asm/pgtable.h #define pud_valid(pud)		pte_valid(pud_pte(pud))
pud               530 arch/arm64/include/asm/pgtable.h static inline void set_pud(pud_t *pudp, pud_t pud)
pud               534 arch/arm64/include/asm/pgtable.h 		set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
pud               539 arch/arm64/include/asm/pgtable.h 	WRITE_ONCE(*pudp, pud);
pud               541 arch/arm64/include/asm/pgtable.h 	if (pud_valid(pud)) {
pud               552 arch/arm64/include/asm/pgtable.h static inline phys_addr_t pud_page_paddr(pud_t pud)
pud               554 arch/arm64/include/asm/pgtable.h 	return __pud_to_phys(pud);
pud               564 arch/arm64/include/asm/pgtable.h #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
pud               567 arch/arm64/include/asm/pgtable.h #define pud_page(pud)		pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
pud               574 arch/arm64/include/asm/pgtable.h #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
pud               587 arch/arm64/include/asm/pgtable.h #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
pud               688 arch/arm64/include/asm/pgtable.h static inline int pud_devmap(pud_t pud)
pud                93 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
pud                96 arch/arm64/include/asm/stage2_pgtable.h 		pgd_populate(NULL, pgd, pud);
pud               108 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
pud               111 arch/arm64/include/asm/stage2_pgtable.h 		free_page((unsigned long)pud);
pud               144 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
pud               147 arch/arm64/include/asm/stage2_pgtable.h 		return pud_none(pud);
pud               152 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
pud               155 arch/arm64/include/asm/stage2_pgtable.h 		pud_clear(pud);
pud               158 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
pud               161 arch/arm64/include/asm/stage2_pgtable.h 		return pud_present(pud);
pud               166 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
pud               169 arch/arm64/include/asm/stage2_pgtable.h 		pud_populate(NULL, pud, pmd);
pud               173 arch/arm64/include/asm/stage2_pgtable.h 				       pud_t *pud, unsigned long address)
pud               176 arch/arm64/include/asm/stage2_pgtable.h 		return pmd_offset(pud, address);
pud               178 arch/arm64/include/asm/stage2_pgtable.h 		return (pmd_t *)pud;
pud               187 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
pud               190 arch/arm64/include/asm/stage2_pgtable.h 		return pud_huge(pud);
pud               443 arch/arm64/kernel/hibernate.c 		pud_t pud = READ_ONCE(*src_pudp);
pud               446 arch/arm64/kernel/hibernate.c 		if (pud_none(pud))
pud               448 arch/arm64/kernel/hibernate.c 		if (pud_table(pud)) {
pud               453 arch/arm64/kernel/hibernate.c 				__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
pud               332 arch/arm64/mm/dump.c 		pud_t pud = READ_ONCE(*pudp);
pud               335 arch/arm64/mm/dump.c 		if (pud_none(pud) || pud_sect(pud)) {
pud               336 arch/arm64/mm/dump.c 			note_page(st, addr, 2, pud_val(pud));
pud               338 arch/arm64/mm/dump.c 			BUG_ON(pud_bad(pud));
pud               159 arch/arm64/mm/fault.c 		pud_t *pudp, pud;
pud               167 arch/arm64/mm/fault.c 		pud = READ_ONCE(*pudp);
pud               168 arch/arm64/mm/fault.c 		pr_cont(", pud=%016llx", pud_val(pud));
pud               169 arch/arm64/mm/fault.c 		if (pud_none(pud) || pud_bad(pud))
pud                47 arch/arm64/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                50 arch/arm64/mm/hugetlbpage.c 	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
pud               264 arch/arm64/mm/hugetlbpage.c 	pud_t *pudp, pud;
pud               272 arch/arm64/mm/hugetlbpage.c 	pud = READ_ONCE(*pudp);
pud               273 arch/arm64/mm/hugetlbpage.c 	if (sz != PUD_SIZE && pud_none(pud))
pud               276 arch/arm64/mm/hugetlbpage.c 	if (pud_huge(pud) || !pud_present(pud))
pud               242 arch/arm64/mm/mmu.c 	pud_t pud = READ_ONCE(*pudp);
pud               247 arch/arm64/mm/mmu.c 	BUG_ON(pud_sect(pud));
pud               248 arch/arm64/mm/mmu.c 	if (pud_none(pud)) {
pud               253 arch/arm64/mm/mmu.c 		pud = READ_ONCE(*pudp);
pud               255 arch/arm64/mm/mmu.c 	BUG_ON(pud_bad(pud));
pud               694 arch/arm64/mm/mmu.c 	pud_t *pudp, pud;
pud               706 arch/arm64/mm/mmu.c 	pud = READ_ONCE(*pudp);
pud               707 arch/arm64/mm/mmu.c 	if (pud_none(pud))
pud               710 arch/arm64/mm/mmu.c 	if (pud_sect(pud))
pud               711 arch/arm64/mm/mmu.c 		return pfn_valid(pud_pfn(pud));
pud               791 arch/arm64/mm/mmu.c 	pud_t pud = READ_ONCE(*pudp);
pud               793 arch/arm64/mm/mmu.c 	BUG_ON(pud_none(pud) || pud_bad(pud));
pud              1023 arch/arm64/mm/mmu.c 	pud_t pud;
pud              1026 arch/arm64/mm/mmu.c 	pud = READ_ONCE(*pudp);
pud              1028 arch/arm64/mm/mmu.c 	if (!pud_table(pud)) {
pud               201 arch/arm64/mm/pageattr.c 	pud_t *pudp, pud;
pud               214 arch/arm64/mm/pageattr.c 	pud = READ_ONCE(*pudp);
pud               215 arch/arm64/mm/pageattr.c 	if (pud_none(pud))
pud               217 arch/arm64/mm/pageattr.c 	if (pud_sect(pud))
pud                79 arch/csky/mm/fault.c 		pud_t *pud, *pud_k;
pud                93 arch/csky/mm/fault.c 		pud = (pud_t *)pgd;
pud                98 arch/csky/mm/fault.c 		pmd = pmd_offset(pud, address);
pud               125 arch/csky/mm/highmem.c 	pud_t *pud;
pud               138 arch/csky/mm/highmem.c 		pud = (pud_t *)pgd;
pud               139 arch/csky/mm/highmem.c 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
pud               140 arch/csky/mm/highmem.c 			pmd = (pmd_t *)pud;
pud               168 arch/csky/mm/highmem.c 	pud_t *pud;
pud               187 arch/csky/mm/highmem.c 	pud = (pud_t *)pgd;
pud               188 arch/csky/mm/highmem.c 	pmd = pmd_offset(pud, vaddr);
pud               179 arch/ia64/include/asm/page.h   typedef struct { unsigned long pud; } pud_t;
pud               188 arch/ia64/include/asm/page.h # define pud_val(x)	((x).pud)
pud                39 arch/ia64/include/asm/pgalloc.h pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
pud                41 arch/ia64/include/asm/pgalloc.h 	pgd_val(*pgd_entry) = __pa(pud);
pud                49 arch/ia64/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud                51 arch/ia64/include/asm/pgalloc.h 	free_page((unsigned long)pud);
pud                53 arch/ia64/include/asm/pgalloc.h #define __pud_free_tlb(tlb, pud, address)	pud_free((tlb)->mm, pud)
pud               278 arch/ia64/include/asm/pgtable.h #define pud_none(pud)			(!pud_val(pud))
pud               279 arch/ia64/include/asm/pgtable.h #define pud_bad(pud)			(!ia64_phys_addr_valid(pud_val(pud)))
pud               280 arch/ia64/include/asm/pgtable.h #define pud_present(pud)		(pud_val(pud) != 0UL)
pud               282 arch/ia64/include/asm/pgtable.h #define pud_page_vaddr(pud)		((unsigned long) __va(pud_val(pud) & _PFN_MASK))
pud               283 arch/ia64/include/asm/pgtable.h #define pud_page(pud)			virt_to_page((pud_val(pud) + PAGE_OFFSET))
pud                32 arch/ia64/mm/fault.c 	pud_t *pud;
pud                40 arch/ia64/mm/fault.c 	pud = pud_offset(pgd, address);
pud                41 arch/ia64/mm/fault.c 	if (pud_none(*pud) || pud_bad(*pud))
pud                44 arch/ia64/mm/fault.c 	pmd = pmd_offset(pud, address);
pud                33 arch/ia64/mm/hugetlbpage.c 	pud_t *pud;
pud                38 arch/ia64/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, taddr);
pud                39 arch/ia64/mm/hugetlbpage.c 	if (pud) {
pud                40 arch/ia64/mm/hugetlbpage.c 		pmd = pmd_alloc(mm, pud, taddr);
pud                52 arch/ia64/mm/hugetlbpage.c 	pud_t *pud;
pud                58 arch/ia64/mm/hugetlbpage.c 		pud = pud_offset(pgd, taddr);
pud                59 arch/ia64/mm/hugetlbpage.c 		if (pud_present(*pud)) {
pud                60 arch/ia64/mm/hugetlbpage.c 			pmd = pmd_offset(pud, taddr);
pud               108 arch/ia64/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud               211 arch/ia64/mm/init.c 	pud_t *pud;
pud               218 arch/ia64/mm/init.c 		pud = pud_alloc(&init_mm, pgd, address);
pud               219 arch/ia64/mm/init.c 		if (!pud)
pud               221 arch/ia64/mm/init.c 		pmd = pmd_alloc(&init_mm, pud, address);
pud               385 arch/ia64/mm/init.c 		pud_t *pud;
pud               395 arch/ia64/mm/init.c 		pud = pud_offset(pgd, end_address);
pud               396 arch/ia64/mm/init.c 		if (pud_none(*pud)) {
pud               401 arch/ia64/mm/init.c 		pmd = pmd_offset(pud, end_address);
pud               433 arch/ia64/mm/init.c 	pud_t *pud;
pud               447 arch/ia64/mm/init.c 			pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
pud               448 arch/ia64/mm/init.c 			if (!pud)
pud               450 arch/ia64/mm/init.c 			pgd_populate(&init_mm, pgd, pud);
pud               452 arch/ia64/mm/init.c 		pud = pud_offset(pgd, address);
pud               454 arch/ia64/mm/init.c 		if (pud_none(*pud)) {
pud               458 arch/ia64/mm/init.c 			pud_populate(&init_mm, pud, pmd);
pud               460 arch/ia64/mm/init.c 		pmd = pmd_offset(pud, address);
pud                38 arch/mips/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud                40 arch/mips/include/asm/pgalloc.h 	set_pud(pud, __pud((unsigned long)pmd));
pud                86 arch/mips/include/asm/pgalloc.h 	pud_t *pud;
pud                88 arch/mips/include/asm/pgalloc.h 	pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER);
pud                89 arch/mips/include/asm/pgalloc.h 	if (pud)
pud                90 arch/mips/include/asm/pgalloc.h 		pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
pud                91 arch/mips/include/asm/pgalloc.h 	return pud;
pud                94 arch/mips/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud                96 arch/mips/include/asm/pgalloc.h 	free_pages((unsigned long)pud, PUD_ORDER);
pud                99 arch/mips/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
pud               101 arch/mips/include/asm/pgalloc.h 	set_pgd(pgd, __pgd((unsigned long)pud));
pud               182 arch/mips/include/asm/pgtable-64.h typedef struct { unsigned long pud; } pud_t;
pud               183 arch/mips/include/asm/pgtable-64.h #define pud_val(x)	((x).pud)
pud               290 arch/mips/include/asm/pgtable-64.h static inline int pud_none(pud_t pud)
pud               292 arch/mips/include/asm/pgtable-64.h 	return pud_val(pud) == (unsigned long) invalid_pmd_table;
pud               295 arch/mips/include/asm/pgtable-64.h static inline int pud_bad(pud_t pud)
pud               297 arch/mips/include/asm/pgtable-64.h 	return pud_val(pud) & ~PAGE_MASK;
pud               300 arch/mips/include/asm/pgtable-64.h static inline int pud_present(pud_t pud)
pud               302 arch/mips/include/asm/pgtable-64.h 	return pud_val(pud) != (unsigned long) invalid_pmd_table;
pud               336 arch/mips/include/asm/pgtable-64.h static inline unsigned long pud_page_vaddr(pud_t pud)
pud               338 arch/mips/include/asm/pgtable-64.h 	return pud_val(pud);
pud               340 arch/mips/include/asm/pgtable-64.h #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
pud               341 arch/mips/include/asm/pgtable-64.h #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
pud               344 arch/mips/include/asm/pgtable-64.h static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
pud               346 arch/mips/include/asm/pgtable-64.h 	return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
pud               139 arch/mips/kvm/mmu.c 	pud_t *pud;
pud               148 arch/mips/kvm/mmu.c 	pud = pud_offset(pgd, addr);
pud               149 arch/mips/kvm/mmu.c 	if (pud_none(*pud)) {
pud               157 arch/mips/kvm/mmu.c 		pud_populate(NULL, pud, new_pmd);
pud               159 arch/mips/kvm/mmu.c 	pmd = pmd_offset(pud, addr);
pud               230 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
pud               241 arch/mips/kvm/mmu.c 		if (!pud_present(pud[i]))
pud               244 arch/mips/kvm/mmu.c 		pmd = pmd_offset(pud + i, 0);
pud               249 arch/mips/kvm/mmu.c 			pud_clear(pud + i);
pud               261 arch/mips/kvm/mmu.c 	pud_t *pud;
pud               272 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);
pud               276 arch/mips/kvm/mmu.c 		if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
pud               278 arch/mips/kvm/mmu.c 			pud_free(NULL, pud);
pud               354 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start,	\
pud               365 arch/mips/kvm/mmu.c 		if (!pud_present(pud[i]))				\
pud               368 arch/mips/kvm/mmu.c 		pmd = pmd_offset(pud + i, 0);				\
pud               381 arch/mips/kvm/mmu.c 	pud_t *pud;							\
pud               391 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);				\
pud               395 arch/mips/kvm/mmu.c 		ret |= kvm_mips_##name##_pud(pud, start, cur_end);	\
pud               888 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
pud               899 arch/mips/kvm/mmu.c 		if (!pud_present(pud[i]))
pud               902 arch/mips/kvm/mmu.c 		pmd = pmd_offset(pud + i, 0);
pud               907 arch/mips/kvm/mmu.c 			pud_clear(pud + i);
pud               919 arch/mips/kvm/mmu.c 	pud_t *pud;
pud               930 arch/mips/kvm/mmu.c 		pud = pud_offset(pgd + i, 0);
pud               934 arch/mips/kvm/mmu.c 		if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
pud               936 arch/mips/kvm/mmu.c 			pud_free(NULL, pud);
pud               567 arch/mips/kvm/trap_emul.c 	pud_t *pud;
pud               579 arch/mips/kvm/trap_emul.c 		pud = pud_offset(pgd + i, 0);
pud               581 arch/mips/kvm/trap_emul.c 			if (pud_none(pud[j]))
pud               587 arch/mips/kvm/trap_emul.c 			pmd = pmd_offset(pud + j, 0);
pud               600 arch/mips/kvm/trap_emul.c 		pud_free(NULL, pud);
pud               297 arch/mips/mm/fault.c 		pud_t *pud, *pud_k;
pud               308 arch/mips/mm/fault.c 		pud = pud_offset(pgd, address);
pud               313 arch/mips/mm/fault.c 		pmd = pmd_offset(pud, address);
pud                28 arch/mips/mm/hugetlbpage.c 	pud_t *pud;
pud                32 arch/mips/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pud                33 arch/mips/mm/hugetlbpage.c 	if (pud)
pud                34 arch/mips/mm/hugetlbpage.c 		pte = (pte_t *)pmd_alloc(mm, pud, addr);
pud                43 arch/mips/mm/hugetlbpage.c 	pud_t *pud;
pud                48 arch/mips/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pud                49 arch/mips/mm/hugetlbpage.c 		if (pud_present(*pud))
pud                50 arch/mips/mm/hugetlbpage.c 			pmd = pmd_offset(pud, addr);
pud                72 arch/mips/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                74 arch/mips/mm/hugetlbpage.c 	return (pud_val(pud) & _PAGE_HUGE) != 0;
pud               235 arch/mips/mm/init.c 	pud_t *pud;
pud               248 arch/mips/mm/init.c 		pud = (pud_t *)pgd;
pud               249 arch/mips/mm/init.c 		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pud               250 arch/mips/mm/init.c 			pmd = (pmd_t *)pud;
pud                81 arch/mips/mm/ioremap.c 		pud_t *pud;
pud                85 arch/mips/mm/ioremap.c 		pud = pud_alloc(&init_mm, dir, address);
pud                86 arch/mips/mm/ioremap.c 		if (!pud)
pud                88 arch/mips/mm/ioremap.c 		pmd = pmd_alloc(&init_mm, pud, address);
pud                59 arch/mips/mm/pgtable-32.c 	pud_t *pud;
pud                85 arch/mips/mm/pgtable-32.c 	pud = pud_offset(pgd, vaddr);
pud                86 arch/mips/mm/pgtable-32.c 	pmd = pmd_offset(pud, vaddr);
pud               362 arch/nds32/mm/fault.c 		pud_t *pud, *pud_k;
pud               372 arch/nds32/mm/fault.c 		pud = pud_offset(pgd, addr);
pud               377 arch/nds32/mm/fault.c 		pmd = pmd_offset(pud, addr);
pud               103 arch/nds32/mm/init.c 	pud_t *pud;
pud               114 arch/nds32/mm/init.c 	pud = pud_offset(pgd, vaddr);
pud               115 arch/nds32/mm/init.c 	pmd = pmd_offset(pud, vaddr);
pud               129 arch/nds32/mm/init.c 	pud = pud_offset(pgd, vaddr);
pud               130 arch/nds32/mm/init.c 	pmd = pmd_offset(pud, vaddr);
pud                35 arch/nds32/mm/proc.c 	pud_t *pud;
pud                41 arch/nds32/mm/proc.c 		pud = pud_offset(pgd, addr);
pud                42 arch/nds32/mm/proc.c 		if (!pud_none(*pud)) {
pud                43 arch/nds32/mm/proc.c 			pmd = pmd_offset(pud, addr);
pud               103 arch/nios2/include/asm/pgtable.h 	pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
pud               248 arch/nios2/mm/fault.c 		pud_t *pud, *pud_k;
pud               259 arch/nios2/mm/fault.c 		pud = pud_offset(pgd, address);
pud               263 arch/nios2/mm/fault.c 		pmd = pmd_offset(pud, address);
pud                89 arch/nios2/mm/ioremap.c 		pud_t *pud;
pud                93 arch/nios2/mm/ioremap.c 		pud = pud_alloc(&init_mm, dir, address);
pud                94 arch/nios2/mm/ioremap.c 		if (!pud)
pud                96 arch/nios2/mm/ioremap.c 		pmd = pmd_alloc(&init_mm, pud, address);
pud               299 arch/openrisc/mm/fault.c 		pud_t *pud, *pud_k;
pud               325 arch/openrisc/mm/fault.c 		pud = pud_offset(pgd, address);
pud               330 arch/openrisc/mm/fault.c 		pmd = pmd_offset(pud, address);
pud               537 arch/parisc/kernel/cache.c 		pud_t *pud = pud_offset(pgd, addr);
pud               538 arch/parisc/kernel/cache.c 		if (!pud_none(*pud)) {
pud               539 arch/parisc/kernel/cache.c 			pmd_t *pmd = pmd_offset(pud, addr);
pud                52 arch/parisc/mm/hugetlbpage.c 	pud_t *pud;
pud                64 arch/parisc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pud                65 arch/parisc/mm/hugetlbpage.c 	if (pud) {
pud                66 arch/parisc/mm/hugetlbpage.c 		pmd = pmd_alloc(mm, pud, addr);
pud                77 arch/parisc/mm/hugetlbpage.c 	pud_t *pud;
pud                85 arch/parisc/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pud                86 arch/parisc/mm/hugetlbpage.c 		if (!pud_none(*pud)) {
pud                87 arch/parisc/mm/hugetlbpage.c 			pmd = pmd_offset(pud, addr);
pud               198 arch/parisc/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud               136 arch/powerpc/include/asm/book3s/64/hash.h #define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
pud                88 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
pud                90 arch/powerpc/include/asm/book3s/64/pgalloc.h 	*pgd =  __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
pud                95 arch/powerpc/include/asm/book3s/64/pgalloc.h 	pud_t *pud;
pud                97 arch/powerpc/include/asm/book3s/64/pgalloc.h 	pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
pud               105 arch/powerpc/include/asm/book3s/64/pgalloc.h 	kmemleak_ignore(pud);
pud               107 arch/powerpc/include/asm/book3s/64/pgalloc.h 	return pud;
pud               110 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud               112 arch/powerpc/include/asm/book3s/64/pgalloc.h 	kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
pud               115 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud               117 arch/powerpc/include/asm/book3s/64/pgalloc.h 	*pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS);
pud               120 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
pud               128 arch/powerpc/include/asm/book3s/64/pgalloc.h 	pgtable_free_tlb(tlb, pud, PUD_INDEX);
pud                19 arch/powerpc/include/asm/book3s/64/pgtable-4k.h static inline int pud_huge(pud_t pud)
pud                25 arch/powerpc/include/asm/book3s/64/pgtable-4k.h 		return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
pud                75 arch/powerpc/include/asm/book3s/64/pgtable-4k.h static inline int pud_huge(pud_t pud) { return 0; }
pud                25 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int pud_huge(pud_t pud)
pud                30 arch/powerpc/include/asm/book3s/64/pgtable-64k.h 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
pud                64 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int pud_huge(pud_t pud) { return 0; }
pud               916 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pud_none(pud_t pud)
pud               918 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !pud_raw(pud);
pud               921 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pud_present(pud_t pud)
pud               923 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
pud               926 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pud_page(pud_t pud);
pud               928 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t pud_pte(pud_t pud)
pud               930 arch/powerpc/include/asm/book3s/64/pgtable.h 	return __pte_raw(pud_raw(pud));
pud               937 arch/powerpc/include/asm/book3s/64/pgtable.h #define pud_write(pud)		pte_write(pud_pte(pud))
pud               939 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pud_bad(pud_t pud)
pud               942 arch/powerpc/include/asm/book3s/64/pgtable.h 		return radix__pud_bad(pud);
pud               943 arch/powerpc/include/asm/book3s/64/pgtable.h 	return hash__pud_bad(pud);
pud               947 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pud_access_permitted(pud_t pud, bool write)
pud               949 arch/powerpc/include/asm/book3s/64/pgtable.h 	return pte_access_permitted(pud_pte(pud), write);
pud               998 arch/powerpc/include/asm/book3s/64/pgtable.h #define pud_page_vaddr(pud)	__va(pud_val(pud) & ~PUD_MASKED_BITS)
pud              1316 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pud_devmap(pud_t pud)
pud              1327 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pud_pfn(pud_t pud)
pud              1366 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pud_is_leaf(pud_t pud)
pud              1368 arch/powerpc/include/asm/book3s/64/pgtable.h 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
pud               224 arch/powerpc/include/asm/book3s/64/radix.h static inline int radix__pud_bad(pud_t pud)
pud               226 arch/powerpc/include/asm/book3s/64/radix.h 	return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
pud                26 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud                28 arch/powerpc/include/asm/nohash/64/pgalloc.h 	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
pud                31 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud                33 arch/powerpc/include/asm/nohash/64/pgalloc.h 	pud_set(pud, (unsigned long)pmd);
pud                61 arch/powerpc/include/asm/nohash/64/pgalloc.h #define __pud_free_tlb(tlb, pud, addr)		      \
pud                62 arch/powerpc/include/asm/nohash/64/pgalloc.h 	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
pud               160 arch/powerpc/include/asm/nohash/64/pgtable.h #define pud_none(pud)		(!pud_val(pud))
pud               161 arch/powerpc/include/asm/nohash/64/pgtable.h #define	pud_bad(pud)		(!is_kernel_addr(pud_val(pud)) \
pud               162 arch/powerpc/include/asm/nohash/64/pgtable.h 				 || (pud_val(pud) & PUD_BAD_BITS))
pud               163 arch/powerpc/include/asm/nohash/64/pgtable.h #define pud_present(pud)	(pud_val(pud) != 0)
pud               164 arch/powerpc/include/asm/nohash/64/pgtable.h #define pud_page_vaddr(pud)	(pud_val(pud) & ~PUD_MASKED_BITS)
pud               166 arch/powerpc/include/asm/nohash/64/pgtable.h extern struct page *pud_page(pud_t pud);
pud               168 arch/powerpc/include/asm/nohash/64/pgtable.h static inline pte_t pud_pte(pud_t pud)
pud               170 arch/powerpc/include/asm/nohash/64/pgtable.h 	return __pte(pud_val(pud));
pud               177 arch/powerpc/include/asm/nohash/64/pgtable.h #define pud_write(pud)		pte_write(pud_pte(pud))
pud               282 arch/powerpc/include/asm/nohash/pgtable.h static inline int pud_huge(pud_t pud)
pud                37 arch/powerpc/include/asm/pgtable-be-types.h typedef struct { __be64 pud; } pud_t;
pud                42 arch/powerpc/include/asm/pgtable-be-types.h 	return be64_to_cpu(x.pud);
pud                47 arch/powerpc/include/asm/pgtable-be-types.h 	return x.pud;
pud                27 arch/powerpc/include/asm/pgtable-types.h typedef struct { unsigned long pud; } pud_t;
pud                31 arch/powerpc/include/asm/pgtable-types.h 	return x.pud;
pud               142 arch/powerpc/include/asm/pgtable.h static inline bool pud_is_leaf(pud_t pud)
pud               475 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
pud               479 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_t *p = pud;
pud               494 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_free(kvm->mm, pud);
pud               502 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_t *pud;
pud               506 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = pud_offset(pgd, 0);
pud               507 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_free_pud(kvm, pud, lpid);
pud               538 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
pud               541 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pmd_t *pmd = pmd_offset(pud, 0);
pud               548 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_clear(pud);
pud               569 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_t *pud, *new_pud = NULL;
pud               576 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud = NULL;
pud               578 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = pud_offset(pgd, gpa);
pud               583 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
pud               584 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmd = pmd_offset(pud, gpa);
pud               605 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud = pud_offset(pgd, gpa);
pud               606 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (pud_is_leaf(*pud)) {
pud               611 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (pud_raw(*pud) == pte_raw(pte)) {
pud               616 arch/powerpc/kvm/book3s_64_mmu_radix.c 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
pud               618 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
pud               632 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
pud               636 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!pud_none(*pud)) {
pud               642 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
pud               644 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
pud               650 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (pud_none(*pud)) {
pud               653 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_populate(kvm->mm, pud, new_pmd);
pud               656 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pmd = pmd_offset(pud, gpa);
pud              1200 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_t pud, *pudp;
pud              1279 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud = READ_ONCE(*pudp);
pud              1280 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (!(pud_val(pud) & _PAGE_PRESENT)) {
pud              1284 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (pud_val(pud) & _PAGE_PTE) {
pud              1285 arch/powerpc/kvm/book3s_64_mmu_radix.c 			pte = pud_val(pud);
pud              1290 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmdp = pmd_offset(&pud, gpa);
pud               677 arch/powerpc/mm/book3s64/radix_pgtable.c static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
pud               689 arch/powerpc/mm/book3s64/radix_pgtable.c 	pud_clear(pud);
pud               820 arch/powerpc/mm/book3s64/radix_pgtable.c 	pud_t *pud;
pud               822 arch/powerpc/mm/book3s64/radix_pgtable.c 	pud = pud_start + pud_index(addr);
pud               823 arch/powerpc/mm/book3s64/radix_pgtable.c 	for (; addr < end; addr = next, pud++) {
pud               826 arch/powerpc/mm/book3s64/radix_pgtable.c 		if (!pud_present(*pud))
pud               829 arch/powerpc/mm/book3s64/radix_pgtable.c 		if (pud_is_leaf(*pud)) {
pud               830 arch/powerpc/mm/book3s64/radix_pgtable.c 			split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
pud               834 arch/powerpc/mm/book3s64/radix_pgtable.c 		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
pud               836 arch/powerpc/mm/book3s64/radix_pgtable.c 		free_pmd_table(pmd_base, pud);
pud              1100 arch/powerpc/mm/book3s64/radix_pgtable.c int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
pud              1102 arch/powerpc/mm/book3s64/radix_pgtable.c 	pte_t *ptep = (pte_t *)pud;
pud              1113 arch/powerpc/mm/book3s64/radix_pgtable.c int pud_clear_huge(pud_t *pud)
pud              1115 arch/powerpc/mm/book3s64/radix_pgtable.c 	if (pud_huge(*pud)) {
pud              1116 arch/powerpc/mm/book3s64/radix_pgtable.c 		pud_clear(pud);
pud              1123 arch/powerpc/mm/book3s64/radix_pgtable.c int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pud              1128 arch/powerpc/mm/book3s64/radix_pgtable.c 	pmd = (pmd_t *)pud_page_vaddr(*pud);
pud              1129 arch/powerpc/mm/book3s64/radix_pgtable.c 	pud_clear(pud);
pud                57 arch/powerpc/mm/book3s64/subpage_prot.c 	pud_t *pud;
pud                65 arch/powerpc/mm/book3s64/subpage_prot.c 	pud = pud_offset(pgd, addr);
pud                66 arch/powerpc/mm/book3s64/subpage_prot.c 	if (pud_none(*pud))
pud                68 arch/powerpc/mm/book3s64/subpage_prot.c 	pmd = pmd_offset(pud, addr);
pud               340 arch/powerpc/mm/hugetlbpage.c static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pud               352 arch/powerpc/mm/hugetlbpage.c 		pmd = pmd_offset(pud, addr);
pud               387 arch/powerpc/mm/hugetlbpage.c 	pmd = pmd_offset(pud, start);
pud               388 arch/powerpc/mm/hugetlbpage.c 	pud_clear(pud);
pud               397 arch/powerpc/mm/hugetlbpage.c 	pud_t *pud;
pud               403 arch/powerpc/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pud               405 arch/powerpc/mm/hugetlbpage.c 		if (!is_hugepd(__hugepd(pud_val(*pud)))) {
pud               406 arch/powerpc/mm/hugetlbpage.c 			if (pud_none_or_clear_bad(pud))
pud               408 arch/powerpc/mm/hugetlbpage.c 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
pud               418 arch/powerpc/mm/hugetlbpage.c 			more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
pud               422 arch/powerpc/mm/hugetlbpage.c 			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
pud               438 arch/powerpc/mm/hugetlbpage.c 	pud = pud_offset(pgd, start);
pud               440 arch/powerpc/mm/hugetlbpage.c 	pud_free_tlb(tlb, pud, start);
pud               268 arch/powerpc/mm/pgtable.c 	pud_t *pud;
pud               275 arch/powerpc/mm/pgtable.c 	pud = pud_offset(pgd, addr);
pud               276 arch/powerpc/mm/pgtable.c 	BUG_ON(pud_none(*pud));
pud               277 arch/powerpc/mm/pgtable.c 	pmd = pmd_offset(pud, addr);
pud               316 arch/powerpc/mm/pgtable.c 	pud_t pud, *pudp;
pud               356 arch/powerpc/mm/pgtable.c 	pud  = READ_ONCE(*pudp);
pud               358 arch/powerpc/mm/pgtable.c 	if (pud_none(pud))
pud               361 arch/powerpc/mm/pgtable.c 	if (pud_is_leaf(pud)) {
pud               366 arch/powerpc/mm/pgtable.c 	if (is_hugepd(__hugepd(pud_val(pud)))) {
pud               367 arch/powerpc/mm/pgtable.c 		hpdp = (hugepd_t *)&pud;
pud               372 arch/powerpc/mm/pgtable.c 	pmdp = pmd_offset(&pud, ea);
pud               136 arch/powerpc/mm/pgtable_32.c 	pud_t	*pud;
pud               143 arch/powerpc/mm/pgtable_32.c 		pud = pud_offset(pgd, addr & PAGE_MASK);
pud               144 arch/powerpc/mm/pgtable_32.c 		if (pud && pud_present(*pud)) {
pud               145 arch/powerpc/mm/pgtable_32.c 			pmd = pmd_offset(pud, addr & PAGE_MASK);
pud               114 arch/powerpc/mm/pgtable_64.c struct page *pud_page(pud_t pud)
pud               116 arch/powerpc/mm/pgtable_64.c 	if (pud_is_leaf(pud)) {
pud               117 arch/powerpc/mm/pgtable_64.c 		VM_WARN_ON(!pud_huge(pud));
pud               118 arch/powerpc/mm/pgtable_64.c 		return pte_page(pud_pte(pud));
pud               120 arch/powerpc/mm/pgtable_64.c 	return virt_to_page(pud_page_vaddr(pud));
pud               406 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
pud               408 arch/powerpc/mm/ptdump/hashpagetable.c 	pmd_t *pmd = pmd_offset(pud, 0);
pud               422 arch/powerpc/mm/ptdump/hashpagetable.c 	pud_t *pud = pud_offset(pgd, 0);
pud               426 arch/powerpc/mm/ptdump/hashpagetable.c 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
pud               428 arch/powerpc/mm/ptdump/hashpagetable.c 		if (!pud_none(*pud))
pud               430 arch/powerpc/mm/ptdump/hashpagetable.c 			walk_pmd(st, pud, addr);
pud               265 arch/powerpc/mm/ptdump/ptdump.c static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
pud               267 arch/powerpc/mm/ptdump/ptdump.c 	pmd_t *pmd = pmd_offset(pud, 0);
pud               283 arch/powerpc/mm/ptdump/ptdump.c 	pud_t *pud = pud_offset(pgd, 0);
pud               287 arch/powerpc/mm/ptdump/ptdump.c 	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
pud               289 arch/powerpc/mm/ptdump/ptdump.c 		if (!pud_none(*pud) && !pud_is_leaf(*pud))
pud               291 arch/powerpc/mm/ptdump/ptdump.c 			walk_pmd(st, pud, addr);
pud               293 arch/powerpc/mm/ptdump/ptdump.c 			note_page(st, addr, 2, pud_val(*pud), PUD_SIZE);
pud                32 arch/riscv/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud                36 arch/riscv/include/asm/pgalloc.h 	set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
pud                31 arch/riscv/include/asm/pgtable-64.h static inline int pud_present(pud_t pud)
pud                33 arch/riscv/include/asm/pgtable-64.h 	return (pud_val(pud) & _PAGE_PRESENT);
pud                36 arch/riscv/include/asm/pgtable-64.h static inline int pud_none(pud_t pud)
pud                38 arch/riscv/include/asm/pgtable-64.h 	return (pud_val(pud) == 0);
pud                41 arch/riscv/include/asm/pgtable-64.h static inline int pud_bad(pud_t pud)
pud                43 arch/riscv/include/asm/pgtable-64.h 	return !pud_present(pud);
pud                46 arch/riscv/include/asm/pgtable-64.h static inline void set_pud(pud_t *pudp, pud_t pud)
pud                48 arch/riscv/include/asm/pgtable-64.h 	*pudp = pud;
pud                56 arch/riscv/include/asm/pgtable-64.h static inline unsigned long pud_page_vaddr(pud_t pud)
pud                58 arch/riscv/include/asm/pgtable-64.h 	return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
pud                63 arch/riscv/include/asm/pgtable-64.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
pud                65 arch/riscv/include/asm/pgtable-64.h 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
pud               216 arch/riscv/mm/fault.c 		pud_t *pud, *pud_k;
pud               247 arch/riscv/mm/fault.c 		pud = pud_offset(p4d, addr);
pud               256 arch/riscv/mm/fault.c 		pmd = pmd_offset(pud, addr);
pud                 5 arch/riscv/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                 7 arch/riscv/mm/hugetlbpage.c 	return pud_present(pud) &&
pud                 8 arch/riscv/mm/hugetlbpage.c 		(pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
pud                83 arch/s390/include/asm/page.h typedef struct { unsigned long pud; } pud_t;
pud                92 arch/s390/include/asm/page.h #define pud_val(x)	((x).pud)
pud                74 arch/s390/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud                77 arch/s390/include/asm/pgalloc.h 		crst_table_free(mm, (unsigned long *) pud);
pud               107 arch/s390/include/asm/pgalloc.h static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
pud               109 arch/s390/include/asm/pgalloc.h 	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
pud               112 arch/s390/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud               114 arch/s390/include/asm/pgalloc.h 	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
pud               659 arch/s390/include/asm/pgtable.h static inline int pud_folded(pud_t pud)
pud               661 arch/s390/include/asm/pgtable.h 	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
pud               664 arch/s390/include/asm/pgtable.h static inline int pud_present(pud_t pud)
pud               666 arch/s390/include/asm/pgtable.h 	if (pud_folded(pud))
pud               668 arch/s390/include/asm/pgtable.h 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
pud               671 arch/s390/include/asm/pgtable.h static inline int pud_none(pud_t pud)
pud               673 arch/s390/include/asm/pgtable.h 	if (pud_folded(pud))
pud               675 arch/s390/include/asm/pgtable.h 	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
pud               678 arch/s390/include/asm/pgtable.h static inline int pud_large(pud_t pud)
pud               680 arch/s390/include/asm/pgtable.h 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
pud               682 arch/s390/include/asm/pgtable.h 	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
pud               685 arch/s390/include/asm/pgtable.h static inline unsigned long pud_pfn(pud_t pud)
pud               690 arch/s390/include/asm/pgtable.h 	if (pud_large(pud))
pud               692 arch/s390/include/asm/pgtable.h 	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
pud               709 arch/s390/include/asm/pgtable.h static inline int pud_bad(pud_t pud)
pud               711 arch/s390/include/asm/pgtable.h 	unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
pud               717 arch/s390/include/asm/pgtable.h 	if (pud_large(pud))
pud               718 arch/s390/include/asm/pgtable.h 		return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
pud               719 arch/s390/include/asm/pgtable.h 	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
pud               760 arch/s390/include/asm/pgtable.h static inline int pud_write(pud_t pud)
pud               762 arch/s390/include/asm/pgtable.h 	return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
pud               902 arch/s390/include/asm/pgtable.h static inline void pud_clear(pud_t *pud)
pud               904 arch/s390/include/asm/pgtable.h 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud               905 arch/s390/include/asm/pgtable.h 		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
pud              1220 arch/s390/include/asm/pgtable.h #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
pud              1221 arch/s390/include/asm/pgtable.h #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
pud              1264 arch/s390/include/asm/pgtable.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
pud              1266 arch/s390/include/asm/pgtable.h 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
pud              1267 arch/s390/include/asm/pgtable.h 		return (pmd_t *) pud_deref(*pud) + pmd_index(address);
pud              1268 arch/s390/include/asm/pgtable.h 	return (pmd_t *) pud;
pud              1292 arch/s390/include/asm/pgtable.h #define pud_page(pud) pfn_to_page(pud_pfn(pud))
pud              1332 arch/s390/include/asm/pgtable.h static inline pud_t pud_wrprotect(pud_t pud)
pud              1334 arch/s390/include/asm/pgtable.h 	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
pud              1335 arch/s390/include/asm/pgtable.h 	pud_val(pud) |= _REGION_ENTRY_PROTECT;
pud              1336 arch/s390/include/asm/pgtable.h 	return pud;
pud              1339 arch/s390/include/asm/pgtable.h static inline pud_t pud_mkwrite(pud_t pud)
pud              1341 arch/s390/include/asm/pgtable.h 	pud_val(pud) |= _REGION3_ENTRY_WRITE;
pud              1342 arch/s390/include/asm/pgtable.h 	if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
pud              1343 arch/s390/include/asm/pgtable.h 		return pud;
pud              1344 arch/s390/include/asm/pgtable.h 	pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
pud              1345 arch/s390/include/asm/pgtable.h 	return pud;
pud              1348 arch/s390/include/asm/pgtable.h static inline pud_t pud_mkclean(pud_t pud)
pud              1350 arch/s390/include/asm/pgtable.h 	if (pud_large(pud)) {
pud              1351 arch/s390/include/asm/pgtable.h 		pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
pud              1352 arch/s390/include/asm/pgtable.h 		pud_val(pud) |= _REGION_ENTRY_PROTECT;
pud              1354 arch/s390/include/asm/pgtable.h 	return pud;
pud              1357 arch/s390/include/asm/pgtable.h static inline pud_t pud_mkdirty(pud_t pud)
pud              1359 arch/s390/include/asm/pgtable.h 	if (pud_large(pud)) {
pud              1360 arch/s390/include/asm/pgtable.h 		pud_val(pud) |= _REGION3_ENTRY_DIRTY |
pud              1362 arch/s390/include/asm/pgtable.h 		if (pud_val(pud) & _REGION3_ENTRY_WRITE)
pud              1363 arch/s390/include/asm/pgtable.h 			pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
pud              1365 arch/s390/include/asm/pgtable.h 	return pud;
pud               125 arch/s390/include/asm/tlb.h static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
pud               133 arch/s390/include/asm/tlb.h 	tlb_remove_table(tlb, pud);
pud               151 arch/s390/mm/dump_pagetables.c 			   pud_t *pud, unsigned long addr)
pud               158 arch/s390/mm/dump_pagetables.c 	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
pud               164 arch/s390/mm/dump_pagetables.c 	pmd = pmd_offset(pud, addr);
pud               185 arch/s390/mm/dump_pagetables.c 	pud_t *pud;
pud               195 arch/s390/mm/dump_pagetables.c 	pud = pud_offset(p4d, addr);
pud               196 arch/s390/mm/dump_pagetables.c 	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
pud               198 arch/s390/mm/dump_pagetables.c 		if (!pud_none(*pud))
pud               199 arch/s390/mm/dump_pagetables.c 			if (pud_large(*pud)) {
pud               200 arch/s390/mm/dump_pagetables.c 				prot = pud_val(*pud) &
pud               205 arch/s390/mm/dump_pagetables.c 				walk_pmd_level(m, st, pud, addr);
pud               547 arch/s390/mm/gmap.c 	pud_t *pud;
pud               586 arch/s390/mm/gmap.c 	pud = pud_offset(p4d, vmaddr);
pud               587 arch/s390/mm/gmap.c 	VM_BUG_ON(pud_none(*pud));
pud               589 arch/s390/mm/gmap.c 	if (pud_large(*pud))
pud               591 arch/s390/mm/gmap.c 	pmd = pmd_offset(pud, vmaddr);
pud               242 arch/s390/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud               244 arch/s390/mm/hugetlbpage.c 	return pud_large(pud);
pud               249 arch/s390/mm/hugetlbpage.c 		pud_t *pud, int flags)
pud               254 arch/s390/mm/hugetlbpage.c 	return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
pud               104 arch/s390/mm/page-states.c static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
pud               110 arch/s390/mm/page-states.c 	pmd = pmd_offset(pud, addr);
pud               124 arch/s390/mm/page-states.c 	pud_t *pud;
pud               127 arch/s390/mm/page-states.c 	pud = pud_offset(p4d, addr);
pud               130 arch/s390/mm/page-states.c 		if (pud_none(*pud) || pud_large(*pud))
pud               132 arch/s390/mm/page-states.c 		if (!pud_folded(*pud)) {
pud               133 arch/s390/mm/page-states.c 			page = virt_to_page(pud_val(*pud));
pud               137 arch/s390/mm/page-states.c 		mark_kernel_pmd(pud, addr, next);
pud               138 arch/s390/mm/page-states.c 	} while (pud++, addr = next, addr != end);
pud               343 arch/s390/mm/pageattr.c 	pud_t *pud;
pud               351 arch/s390/mm/pageattr.c 		pud = pud_offset(p4d, address);
pud               352 arch/s390/mm/pageattr.c 		pmd = pmd_offset(pud, address);
pud               418 arch/s390/mm/pgtable.c 	pud_t *pud;
pud               425 arch/s390/mm/pgtable.c 	pud = pud_alloc(mm, p4d, addr);
pud               426 arch/s390/mm/pgtable.c 	if (!pud)
pud               428 arch/s390/mm/pgtable.c 	pmd = pmd_alloc(mm, pud, addr);
pud                36 arch/sh/include/asm/pgtable-3level.h static inline unsigned long pud_page_vaddr(pud_t pud)
pud                38 arch/sh/include/asm/pgtable-3level.h 	return pud_val(pud);
pud                42 arch/sh/include/asm/pgtable-3level.h #define pud_page(pud)		NULL
pud                45 arch/sh/include/asm/pgtable-3level.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
pud                47 arch/sh/include/asm/pgtable-3level.h 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
pud               212 arch/sh/mm/cache-sh4.c 	pud_t *pud;
pud               227 arch/sh/mm/cache-sh4.c 	pud = pud_offset(pgd, address);
pud               228 arch/sh/mm/cache-sh4.c 	pmd = pmd_offset(pud, address);
pud               386 arch/sh/mm/cache-sh5.c 	pud_t *pud;
pud               400 arch/sh/mm/cache-sh5.c 	pud = pud_offset(pgd, addr);
pud               401 arch/sh/mm/cache-sh5.c 	if (pud_none(*pud) || pud_bad(*pud))
pud               404 arch/sh/mm/cache-sh5.c 	pmd = pmd_offset(pud, addr);
pud                56 arch/sh/mm/fault.c 		pud_t *pud;
pud                68 arch/sh/mm/fault.c 		pud = pud_offset(pgd, addr);
pud                70 arch/sh/mm/fault.c 			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
pud                71 arch/sh/mm/fault.c 			       (u64)pud_val(*pud));
pud                73 arch/sh/mm/fault.c 		if (pud_none(*pud))
pud                76 arch/sh/mm/fault.c 		if (pud_bad(*pud)) {
pud                81 arch/sh/mm/fault.c 		pmd = pmd_offset(pud, addr);
pud               110 arch/sh/mm/fault.c 	pud_t *pud, *pud_k;
pud               119 arch/sh/mm/fault.c 	pud = pud_offset(pgd, address);
pud               124 arch/sh/mm/fault.c 	if (!pud_present(*pud))
pud               125 arch/sh/mm/fault.c 	    set_pud(pud, *pud_k);
pud               127 arch/sh/mm/fault.c 	pmd = pmd_offset(pud, address);
pud                29 arch/sh/mm/hugetlbpage.c 	pud_t *pud;
pud                35 arch/sh/mm/hugetlbpage.c 		pud = pud_alloc(mm, pgd, addr);
pud                36 arch/sh/mm/hugetlbpage.c 		if (pud) {
pud                37 arch/sh/mm/hugetlbpage.c 			pmd = pmd_alloc(mm, pud, addr);
pud                50 arch/sh/mm/hugetlbpage.c 	pud_t *pud;
pud                56 arch/sh/mm/hugetlbpage.c 		pud = pud_offset(pgd, addr);
pud                57 arch/sh/mm/hugetlbpage.c 		if (pud) {
pud                58 arch/sh/mm/hugetlbpage.c 			pmd = pmd_offset(pud, addr);
pud                72 arch/sh/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                48 arch/sh/mm/init.c 	pud_t *pud;
pud                57 arch/sh/mm/init.c 	pud = pud_alloc(NULL, pgd, addr);
pud                58 arch/sh/mm/init.c 	if (unlikely(!pud)) {
pud                59 arch/sh/mm/init.c 		pud_ERROR(*pud);
pud                63 arch/sh/mm/init.c 	pmd = pmd_alloc(NULL, pud, addr);
pud               126 arch/sh/mm/init.c static pmd_t * __init one_md_table_init(pud_t *pud)
pud               128 arch/sh/mm/init.c 	if (pud_none(*pud)) {
pud               135 arch/sh/mm/init.c 		pud_populate(&init_mm, pud, pmd);
pud               136 arch/sh/mm/init.c 		BUG_ON(pmd != pmd_offset(pud, 0));
pud               139 arch/sh/mm/init.c 	return pmd_offset(pud, 0);
pud               168 arch/sh/mm/init.c 	pud_t *pud;
pud               181 arch/sh/mm/init.c 		pud = (pud_t *)pgd;
pud               182 arch/sh/mm/init.c 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
pud               183 arch/sh/mm/init.c 			pmd = one_md_table_init(pud);
pud                44 arch/sh/mm/pgtable.c void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud                46 arch/sh/mm/pgtable.c 	set_pud(pud, __pud((unsigned long)pmd));
pud                26 arch/sh/mm/tlbex_32.c 	pud_t *pud;
pud                45 arch/sh/mm/tlbex_32.c 	pud = pud_offset(pgd, address);
pud                46 arch/sh/mm/tlbex_32.c 	if (pud_none_or_clear_bad(pud))
pud                48 arch/sh/mm/tlbex_32.c 	pmd = pmd_offset(pud, address);
pud                47 arch/sh/mm/tlbex_64.c 	pud_t *pud;
pud                61 arch/sh/mm/tlbex_64.c 	pud = pud_offset(pgd, address);
pud                62 arch/sh/mm/tlbex_64.c 	if (pud_none(*pud) || !pud_present(*pud))
pud                65 arch/sh/mm/tlbex_64.c 	pmd = pmd_offset(pud, address);
pud                72 arch/sparc/include/asm/page_64.h typedef struct { unsigned long pud; } pud_t;
pud                79 arch/sparc/include/asm/page_64.h #define pud_val(x)      ((x).pud)
pud                19 arch/sparc/include/asm/pgalloc_64.h static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
pud                21 arch/sparc/include/asm/pgalloc_64.h 	pgd_set(pgd, pud);
pud                36 arch/sparc/include/asm/pgalloc_64.h static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
pud                38 arch/sparc/include/asm/pgalloc_64.h 	pud_set(pud, pmd);
pud                48 arch/sparc/include/asm/pgalloc_64.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud                50 arch/sparc/include/asm/pgalloc_64.h 	kmem_cache_free(pgtable_cache, pud);
pud               112 arch/sparc/include/asm/pgalloc_64.h #define __pud_free_tlb(tlb, pud, addr)		      \
pud               113 arch/sparc/include/asm/pgalloc_64.h 	pgtable_free_tlb(tlb, pud, false)
pud               418 arch/sparc/include/asm/pgtable_64.h static inline bool is_hugetlb_pud(pud_t pud)
pud               420 arch/sparc/include/asm/pgtable_64.h 	return !!(pud_val(pud) & _PAGE_PUD_HUGE);
pud               708 arch/sparc/include/asm/pgtable_64.h #define pud_write(pud)	pte_write(__pte(pud_val(pud)))
pud               809 arch/sparc/include/asm/pgtable_64.h #define pud_none(pud)			(!pud_val(pud))
pud               811 arch/sparc/include/asm/pgtable_64.h #define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
pud               847 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pud_page_vaddr(pud_t pud)
pud               849 arch/sparc/include/asm/pgtable_64.h 	pte_t pte = __pte(pud_val(pud));
pud               858 arch/sparc/include/asm/pgtable_64.h #define pud_page(pud) 			virt_to_page((void *)pud_page_vaddr(pud))
pud               860 arch/sparc/include/asm/pgtable_64.h #define pud_present(pud)		(pud_val(pud) != 0U)
pud               870 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pud_large(pud_t pud)
pud               872 arch/sparc/include/asm/pgtable_64.h 	pte_t pte = __pte(pud_val(pud));
pud               877 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pud_pfn(pud_t pud)
pud               879 arch/sparc/include/asm/pgtable_64.h 	pte_t pte = __pte(pud_val(pud));
pud              1624 arch/sparc/kernel/smp_64.c 	pud_t *pud;
pud              1636 arch/sparc/kernel/smp_64.c 	pud = pud_offset(pgd, addr);
pud              1637 arch/sparc/kernel/smp_64.c 	if (pud_none(*pud)) {
pud              1643 arch/sparc/kernel/smp_64.c 		pud_populate(&init_mm, pud, new);
pud              1646 arch/sparc/kernel/smp_64.c 	pmd = pmd_offset(pud, addr);
pud               280 arch/sparc/mm/hugetlbpage.c 	pud_t *pud;
pud               284 arch/sparc/mm/hugetlbpage.c 	pud = pud_alloc(mm, pgd, addr);
pud               285 arch/sparc/mm/hugetlbpage.c 	if (!pud)
pud               288 arch/sparc/mm/hugetlbpage.c 		return (pte_t *)pud;
pud               289 arch/sparc/mm/hugetlbpage.c 	pmd = pmd_alloc(mm, pud, addr);
pud               301 arch/sparc/mm/hugetlbpage.c 	pud_t *pud;
pud               307 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, addr);
pud               308 arch/sparc/mm/hugetlbpage.c 	if (pud_none(*pud))
pud               310 arch/sparc/mm/hugetlbpage.c 	if (is_hugetlb_pud(*pud))
pud               311 arch/sparc/mm/hugetlbpage.c 		return (pte_t *)pud;
pud               312 arch/sparc/mm/hugetlbpage.c 	pmd = pmd_offset(pud, addr);
pud               399 arch/sparc/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud               401 arch/sparc/mm/hugetlbpage.c 	return !pud_none(pud) &&
pud               402 arch/sparc/mm/hugetlbpage.c 		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
pud               415 arch/sparc/mm/hugetlbpage.c static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pud               424 arch/sparc/mm/hugetlbpage.c 	pmd = pmd_offset(pud, addr);
pud               446 arch/sparc/mm/hugetlbpage.c 	pmd = pmd_offset(pud, start);
pud               447 arch/sparc/mm/hugetlbpage.c 	pud_clear(pud);
pud               456 arch/sparc/mm/hugetlbpage.c 	pud_t *pud;
pud               461 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, addr);
pud               464 arch/sparc/mm/hugetlbpage.c 		if (pud_none_or_clear_bad(pud))
pud               466 arch/sparc/mm/hugetlbpage.c 		if (is_hugetlb_pud(*pud))
pud               467 arch/sparc/mm/hugetlbpage.c 			pud_clear(pud);
pud               469 arch/sparc/mm/hugetlbpage.c 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
pud               471 arch/sparc/mm/hugetlbpage.c 	} while (pud++, addr = next, addr != end);
pud               484 arch/sparc/mm/hugetlbpage.c 	pud = pud_offset(pgd, start);
pud               486 arch/sparc/mm/hugetlbpage.c 	pud_free_tlb(tlb, pud, start);
pud              1656 arch/sparc/mm/init_64.c 	pud_t *pud;
pud              1677 arch/sparc/mm/init_64.c 	pud = pud_offset(pgd, addr);
pud              1678 arch/sparc/mm/init_64.c 	if (pud_none(*pud))
pud              1681 arch/sparc/mm/init_64.c 	if (pud_large(*pud))
pud              1682 arch/sparc/mm/init_64.c 		return pfn_valid(pud_pfn(*pud));
pud              1684 arch/sparc/mm/init_64.c 	pmd = pmd_offset(pud, addr);
pud              1701 arch/sparc/mm/init_64.c 					      pud_t *pud)
pud              1710 arch/sparc/mm/init_64.c 		pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
pud              1720 arch/sparc/mm/init_64.c 		pud_val(*pud) = pte_val;
pud              1724 arch/sparc/mm/init_64.c 		pud++;
pud              1803 arch/sparc/mm/init_64.c 		pud_t *pud;
pud              1817 arch/sparc/mm/init_64.c 		pud = pud_offset(pgd, vstart);
pud              1818 arch/sparc/mm/init_64.c 		if (pud_none(*pud)) {
pud              1822 arch/sparc/mm/init_64.c 				vstart = kernel_map_hugepud(vstart, vend, pud);
pud              1830 arch/sparc/mm/init_64.c 			pud_populate(&init_mm, pud, new);
pud              1833 arch/sparc/mm/init_64.c 		pmd = pmd_offset(pud, vstart);
pud              2615 arch/sparc/mm/init_64.c 		pud_t *pud;
pud              2621 arch/sparc/mm/init_64.c 		pud = vmemmap_pud_populate(pgd, vstart, node);
pud              2622 arch/sparc/mm/init_64.c 		if (!pud)
pud              2625 arch/sparc/mm/init_64.c 		pmd = pmd_offset(pud, vstart);
pud                60 arch/um/include/asm/pgtable-3level.h #define pud_populate(mm, pud, pmd) \
pud                61 arch/um/include/asm/pgtable-3level.h 	set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
pud                85 arch/um/include/asm/pgtable-3level.h static inline void pud_clear (pud_t *pud)
pud                87 arch/um/include/asm/pgtable-3level.h 	set_pud(pud, __pud(_PAGE_NEWPAGE));
pud                90 arch/um/include/asm/pgtable-3level.h #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
pud                91 arch/um/include/asm/pgtable-3level.h #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
pud                94 arch/um/include/asm/pgtable-3level.h #define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
pud                81 arch/um/kernel/mem.c static void __init one_md_table_init(pud_t *pud)
pud                89 arch/um/kernel/mem.c 	set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
pud                90 arch/um/kernel/mem.c 	if (pmd_table != pmd_offset(pud, 0))
pud                99 arch/um/kernel/mem.c 	pud_t *pud;
pud               110 arch/um/kernel/mem.c 		pud = pud_offset(pgd, vaddr);
pud               111 arch/um/kernel/mem.c 		if (pud_none(*pud))
pud               112 arch/um/kernel/mem.c 			one_md_table_init(pud);
pud               113 arch/um/kernel/mem.c 		pmd = pmd_offset(pud, vaddr);
pud               127 arch/um/kernel/mem.c 	pud_t *pud;
pud               147 arch/um/kernel/mem.c 		pud = pud_offset(pgd, vaddr);
pud               148 arch/um/kernel/mem.c 		pmd = pmd_offset(pud, vaddr);
pud                22 arch/um/kernel/skas/mmu.c 	pud_t *pud;
pud                27 arch/um/kernel/skas/mmu.c 	pud = pud_alloc(mm, pgd, proc);
pud                28 arch/um/kernel/skas/mmu.c 	if (!pud)
pud                31 arch/um/kernel/skas/mmu.c 	pmd = pmd_alloc(mm, pud, proc);
pud                46 arch/um/kernel/skas/mmu.c 	pud_free(mm, pud);
pud                20 arch/um/kernel/skas/uaccess.c 	pud_t *pud;
pud                30 arch/um/kernel/skas/uaccess.c 	pud = pud_offset(pgd, addr);
pud                31 arch/um/kernel/skas/uaccess.c 	if (!pud_present(*pud))
pud                34 arch/um/kernel/skas/uaccess.c 	pmd = pmd_offset(pud, addr);
pud               258 arch/um/kernel/tlb.c static inline int update_pmd_range(pud_t *pud, unsigned long addr,
pud               266 arch/um/kernel/tlb.c 	pmd = pmd_offset(pud, addr);
pud               284 arch/um/kernel/tlb.c 	pud_t *pud;
pud               288 arch/um/kernel/tlb.c 	pud = pud_offset(pgd, addr);
pud               291 arch/um/kernel/tlb.c 		if (!pud_present(*pud)) {
pud               292 arch/um/kernel/tlb.c 			if (hvc->force || pud_newpage(*pud)) {
pud               294 arch/um/kernel/tlb.c 				pud_mkuptodate(*pud);
pud               297 arch/um/kernel/tlb.c 		else ret = update_pmd_range(pud, addr, next, hvc);
pud               298 arch/um/kernel/tlb.c 	} while (pud++, addr = next, ((addr < end) && !ret));
pud               341 arch/um/kernel/tlb.c 	pud_t *pud;
pud               367 arch/um/kernel/tlb.c 		pud = pud_offset(pgd, addr);
pud               368 arch/um/kernel/tlb.c 		if (!pud_present(*pud)) {
pud               372 arch/um/kernel/tlb.c 			if (pud_newpage(*pud)) {
pud               383 arch/um/kernel/tlb.c 		pmd = pmd_offset(pud, addr);
pud               427 arch/um/kernel/tlb.c 	pud_t *pud;
pud               440 arch/um/kernel/tlb.c 	pud = pud_offset(pgd, address);
pud               441 arch/um/kernel/tlb.c 	if (!pud_present(*pud))
pud               444 arch/um/kernel/tlb.c 	pmd = pmd_offset(pud, address);
pud               503 arch/um/kernel/tlb.c pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
pud               505 arch/um/kernel/tlb.c 	return pmd_offset(pud, address);
pud               516 arch/um/kernel/tlb.c 	pud_t *pud = pud_offset(pgd, addr);
pud               517 arch/um/kernel/tlb.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud                31 arch/um/kernel/trap.c 	pud_t *pud;
pud               107 arch/um/kernel/trap.c 		pud = pud_offset(pgd, address);
pud               108 arch/um/kernel/trap.c 		pmd = pmd_offset(pud, address);
pud                35 arch/unicore32/kernel/hibernate.c 	pud_t *pud;
pud                38 arch/unicore32/kernel/hibernate.c 	pud = pud_offset(pgd, 0);
pud                39 arch/unicore32/kernel/hibernate.c 	pmd_table = pmd_offset(pud, 0);
pud               361 arch/x86/entry/vsyscall/vsyscall_64.c 	pud_t *pud;
pud               370 arch/x86/entry/vsyscall/vsyscall_64.c 	pud = pud_offset(p4d, VSYSCALL_ADDR);
pud               371 arch/x86/entry/vsyscall/vsyscall_64.c 	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
pud               372 arch/x86/entry/vsyscall/vsyscall_64.c 	pmd = pmd_offset(pud, VSYSCALL_ADDR);
pud               154 arch/x86/include/asm/kexec.h 	pud_t *pud;
pud               489 arch/x86/include/asm/paravirt.h static inline void set_pud(pud_t *pudp, pud_t pud)
pud               491 arch/x86/include/asm/paravirt.h 	pudval_t val = native_pud_val(pud);
pud               508 arch/x86/include/asm/paravirt.h static inline pudval_t pud_val(pud_t pud)
pud               510 arch/x86/include/asm/paravirt.h 	return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
pud               124 arch/x86/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud               127 arch/x86/include/asm/pgalloc.h 	set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
pud               130 arch/x86/include/asm/pgalloc.h static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud               133 arch/x86/include/asm/pgalloc.h 	set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
pud               138 arch/x86/include/asm/pgalloc.h static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
pud               140 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
pud               141 arch/x86/include/asm/pgalloc.h 	set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
pud               144 arch/x86/include/asm/pgalloc.h static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
pud               146 arch/x86/include/asm/pgalloc.h 	paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
pud               147 arch/x86/include/asm/pgalloc.h 	set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
pud               159 arch/x86/include/asm/pgalloc.h static inline void pud_free(struct mm_struct *mm, pud_t *pud)
pud               161 arch/x86/include/asm/pgalloc.h 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
pud               162 arch/x86/include/asm/pgalloc.h 	free_page((unsigned long)pud);
pud               165 arch/x86/include/asm/pgalloc.h extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
pud               167 arch/x86/include/asm/pgalloc.h static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
pud               170 arch/x86/include/asm/pgalloc.h 	___pud_free_tlb(tlb, pud);
pud                25 arch/x86/include/asm/pgtable-2level.h static inline void native_set_pud(pud_t *pudp, pud_t pud)
pud               101 arch/x86/include/asm/pgtable-3level.h static inline void native_set_pud(pud_t *pudp, pud_t pud)
pud               104 arch/x86/include/asm/pgtable-3level.h 	pud.p4d.pgd = pti_set_user_pgtbl(&pudp->p4d.pgd, pud.p4d.pgd);
pud               106 arch/x86/include/asm/pgtable-3level.h 	set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
pud               227 arch/x86/include/asm/pgtable-3level.h 	pud_t pud;
pud               243 arch/x86/include/asm/pgtable-3level.h 	return res.pud;
pud                85 arch/x86/include/asm/pgtable.h # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
pud                89 arch/x86/include/asm/pgtable.h #define pud_clear(pud)			native_pud_clear(pud)
pud               172 arch/x86/include/asm/pgtable.h static inline int pud_dirty(pud_t pud)
pud               174 arch/x86/include/asm/pgtable.h 	return pud_flags(pud) & _PAGE_DIRTY;
pud               177 arch/x86/include/asm/pgtable.h static inline int pud_young(pud_t pud)
pud               179 arch/x86/include/asm/pgtable.h 	return pud_flags(pud) & _PAGE_ACCESSED;
pud               225 arch/x86/include/asm/pgtable.h static inline unsigned long pud_pfn(pud_t pud)
pud               227 arch/x86/include/asm/pgtable.h 	phys_addr_t pfn = pud_val(pud);
pud               229 arch/x86/include/asm/pgtable.h 	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
pud               263 arch/x86/include/asm/pgtable.h static inline int pud_trans_huge(pud_t pud)
pud               265 arch/x86/include/asm/pgtable.h 	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
pud               282 arch/x86/include/asm/pgtable.h static inline int pud_devmap(pud_t pud)
pud               284 arch/x86/include/asm/pgtable.h 	return !!(pud_val(pud) & _PAGE_DEVMAP);
pud               287 arch/x86/include/asm/pgtable.h static inline int pud_devmap(pud_t pud)
pud               433 arch/x86/include/asm/pgtable.h static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
pud               435 arch/x86/include/asm/pgtable.h 	pudval_t v = native_pud_val(pud);
pud               440 arch/x86/include/asm/pgtable.h static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
pud               442 arch/x86/include/asm/pgtable.h 	pudval_t v = native_pud_val(pud);
pud               447 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkold(pud_t pud)
pud               449 arch/x86/include/asm/pgtable.h 	return pud_clear_flags(pud, _PAGE_ACCESSED);
pud               452 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkclean(pud_t pud)
pud               454 arch/x86/include/asm/pgtable.h 	return pud_clear_flags(pud, _PAGE_DIRTY);
pud               457 arch/x86/include/asm/pgtable.h static inline pud_t pud_wrprotect(pud_t pud)
pud               459 arch/x86/include/asm/pgtable.h 	return pud_clear_flags(pud, _PAGE_RW);
pud               462 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkdirty(pud_t pud)
pud               464 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
pud               467 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkdevmap(pud_t pud)
pud               469 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_DEVMAP);
pud               472 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkhuge(pud_t pud)
pud               474 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_PSE);
pud               477 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkyoung(pud_t pud)
pud               479 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_ACCESSED);
pud               482 arch/x86/include/asm/pgtable.h static inline pud_t pud_mkwrite(pud_t pud)
pud               484 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_RW);
pud               498 arch/x86/include/asm/pgtable.h static inline int pud_soft_dirty(pud_t pud)
pud               500 arch/x86/include/asm/pgtable.h 	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
pud               513 arch/x86/include/asm/pgtable.h static inline pud_t pud_mksoft_dirty(pud_t pud)
pud               515 arch/x86/include/asm/pgtable.h 	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
pud               528 arch/x86/include/asm/pgtable.h static inline pud_t pud_clear_soft_dirty(pud_t pud)
pud               530 arch/x86/include/asm/pgtable.h 	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
pud               596 arch/x86/include/asm/pgtable.h static inline pud_t pud_mknotpresent(pud_t pud)
pud               598 arch/x86/include/asm/pgtable.h 	return pfn_pud(pud_pfn(pud),
pud               599 arch/x86/include/asm/pgtable.h 	      __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
pud               854 arch/x86/include/asm/pgtable.h static inline int pud_none(pud_t pud)
pud               856 arch/x86/include/asm/pgtable.h 	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
pud               859 arch/x86/include/asm/pgtable.h static inline int pud_present(pud_t pud)
pud               861 arch/x86/include/asm/pgtable.h 	return pud_flags(pud) & _PAGE_PRESENT;
pud               864 arch/x86/include/asm/pgtable.h static inline unsigned long pud_page_vaddr(pud_t pud)
pud               866 arch/x86/include/asm/pgtable.h 	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
pud               873 arch/x86/include/asm/pgtable.h #define pud_page(pud)	pfn_to_page(pud_pfn(pud))
pud               876 arch/x86/include/asm/pgtable.h static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
pud               878 arch/x86/include/asm/pgtable.h 	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
pud               881 arch/x86/include/asm/pgtable.h static inline int pud_large(pud_t pud)
pud               883 arch/x86/include/asm/pgtable.h 	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
pud               887 arch/x86/include/asm/pgtable.h static inline int pud_bad(pud_t pud)
pud               889 arch/x86/include/asm/pgtable.h 	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
pud               892 arch/x86/include/asm/pgtable.h static inline int pud_large(pud_t pud)
pud              1096 arch/x86/include/asm/pgtable.h 			      pud_t *pudp, pud_t pud)
pud              1098 arch/x86/include/asm/pgtable.h 	native_set_pud(pudp, pud);
pud              1207 arch/x86/include/asm/pgtable.h static inline int pud_write(pud_t pud)
pud              1209 arch/x86/include/asm/pgtable.h 	return pud_flags(pud) & _PAGE_RW;
pud              1340 arch/x86/include/asm/pgtable.h 		unsigned long addr, pud_t *pud)
pud              1457 arch/x86/include/asm/pgtable.h static inline bool pud_access_permitted(pud_t pud, bool write)
pud              1459 arch/x86/include/asm/pgtable.h 	return __pte_access_permitted(pud_val(pud), write);
pud               111 arch/x86/include/asm/pgtable_64.h static inline void native_set_pud(pud_t *pudp, pud_t pud)
pud               113 arch/x86/include/asm/pgtable_64.h 	WRITE_ONCE(*pudp, pud);
pud               116 arch/x86/include/asm/pgtable_64.h static inline void native_pud_clear(pud_t *pud)
pud               118 arch/x86/include/asm/pgtable_64.h 	native_set_pud(pud, native_make_pud(0));
pud               124 arch/x86/include/asm/pgtable_64.h 	return native_make_pud(xchg(&xp->pud, 0));
pud               334 arch/x86/include/asm/pgtable_types.h typedef struct { pudval_t pud; } pud_t;
pud               341 arch/x86/include/asm/pgtable_types.h static inline pudval_t native_pud_val(pud_t pud)
pud               343 arch/x86/include/asm/pgtable_types.h 	return pud.pud;
pud               353 arch/x86/include/asm/pgtable_types.h static inline pudval_t native_pud_val(pud_t pud)
pud               355 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(pud.p4d.pgd);
pud               376 arch/x86/include/asm/pgtable_types.h 	return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
pud               381 arch/x86/include/asm/pgtable_types.h 	return native_pgd_val(pmd.pud.p4d.pgd);
pud               401 arch/x86/include/asm/pgtable_types.h static inline pudval_t pud_pfn_mask(pud_t pud)
pud               403 arch/x86/include/asm/pgtable_types.h 	if (native_pud_val(pud) & _PAGE_PSE)
pud               409 arch/x86/include/asm/pgtable_types.h static inline pudval_t pud_flags_mask(pud_t pud)
pud               411 arch/x86/include/asm/pgtable_types.h 	return ~pud_pfn_mask(pud);
pud               414 arch/x86/include/asm/pgtable_types.h static inline pudval_t pud_flags(pud_t pud)
pud               416 arch/x86/include/asm/pgtable_types.h 	return native_pud_val(pud) & pud_flags_mask(pud);
pud               344 arch/x86/include/asm/xen/page.h #define pud_val_ma(v) ((v).pud)
pud               135 arch/x86/kernel/espfix_64.c 	pud_t pud, *pud_p;
pud               165 arch/x86/kernel/espfix_64.c 	pud = *pud_p;
pud               166 arch/x86/kernel/espfix_64.c 	if (!pud_present(pud)) {
pud               170 arch/x86/kernel/espfix_64.c 		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
pud               173 arch/x86/kernel/espfix_64.c 			set_pud(&pud_p[n], pud);
pud               176 arch/x86/kernel/espfix_64.c 	pmd_p = pmd_offset(&pud, addr);
pud               121 arch/x86/kernel/head64.c 	pudval_t *pud;
pud               165 arch/x86/kernel/head64.c 	pud = fixup_pointer(&level3_kernel_pgt, physaddr);
pud               166 arch/x86/kernel/head64.c 	pud[510] += load_delta;
pud               167 arch/x86/kernel/head64.c 	pud[511] += load_delta;
pud               181 arch/x86/kernel/head64.c 	pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
pud               195 arch/x86/kernel/head64.c 		p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
pud               196 arch/x86/kernel/head64.c 		p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
pud               199 arch/x86/kernel/head64.c 		pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
pud               200 arch/x86/kernel/head64.c 		pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
pud               204 arch/x86/kernel/head64.c 	pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
pud               205 arch/x86/kernel/head64.c 	pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
pud               305 arch/x86/kernel/head64.c 	pudval_t pud, *pud_p;
pud               351 arch/x86/kernel/head64.c 	pud = *pud_p;
pud               353 arch/x86/kernel/head64.c 	if (pud)
pud               354 arch/x86/kernel/head64.c 		pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
pud               134 arch/x86/kernel/ldt.c 	pud_t *pud;
pud               143 arch/x86/kernel/ldt.c 	pud = pud_offset(p4d, va);
pud               144 arch/x86/kernel/ldt.c 	if (pud_none(*pud))
pud               147 arch/x86/kernel/ldt.c 	return pmd_offset(pud, va);
pud                96 arch/x86/kernel/machine_kexec_32.c 	pud_t *pud;
pud               104 arch/x86/kernel/machine_kexec_32.c 	pud = pud_offset(p4d, vaddr);
pud               105 arch/x86/kernel/machine_kexec_32.c 	pmd = pmd_offset(pud, vaddr);
pud               116 arch/x86/kernel/machine_kexec_64.c 	free_page((unsigned long)image->arch.pud);
pud               117 arch/x86/kernel/machine_kexec_64.c 	image->arch.pud = NULL;
pud               130 arch/x86/kernel/machine_kexec_64.c 	pud_t *pud;
pud               146 arch/x86/kernel/machine_kexec_64.c 		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
pud               147 arch/x86/kernel/machine_kexec_64.c 		if (!pud)
pud               149 arch/x86/kernel/machine_kexec_64.c 		image->arch.pud = pud;
pud               150 arch/x86/kernel/machine_kexec_64.c 		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
pud               152 arch/x86/kernel/machine_kexec_64.c 	pud = pud_offset(p4d, vaddr);
pud               153 arch/x86/kernel/machine_kexec_64.c 	if (!pud_present(*pud)) {
pud               158 arch/x86/kernel/machine_kexec_64.c 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
pud               160 arch/x86/kernel/machine_kexec_64.c 	pmd = pmd_offset(pud, vaddr);
pud               109 arch/x86/kernel/tboot.c 	pud_t *pud;
pud               117 arch/x86/kernel/tboot.c 	pud = pud_alloc(&tboot_mm, p4d, vaddr);
pud               118 arch/x86/kernel/tboot.c 	if (!pud)
pud               120 arch/x86/kernel/tboot.c 	pmd = pmd_alloc(&tboot_mm, pud, vaddr);
pud               170 arch/x86/kernel/vm86_32.c 	pud_t *pud;
pud               182 arch/x86/kernel/vm86_32.c 	pud = pud_offset(p4d, 0xA0000);
pud               183 arch/x86/kernel/vm86_32.c 	if (pud_none_or_clear_bad(pud))
pud               185 arch/x86/kernel/vm86_32.c 	pmd = pmd_offset(pud, 0xA0000);
pud               154 arch/x86/mm/fault.c 	pud_t *pud, *pud_k;
pud               173 arch/x86/mm/fault.c 	pud = pud_offset(p4d, address);
pud               178 arch/x86/mm/fault.c 	pmd = pmd_offset(pud, address);
pud               296 arch/x86/mm/fault.c 	pud_t *pud;
pud               309 arch/x86/mm/fault.c 	pud = pud_offset(p4d, address);
pud               310 arch/x86/mm/fault.c 	pmd = pmd_offset(pud, address);
pud               357 arch/x86/mm/fault.c 	pud_t *pud;
pud               399 arch/x86/mm/fault.c 	pud = pud_offset(p4d, address);
pud               400 arch/x86/mm/fault.c 	if (pud_none(*pud))
pud               403 arch/x86/mm/fault.c 	if (pud_large(*pud))
pud               406 arch/x86/mm/fault.c 	pmd = pmd_offset(pud, address);
pud               451 arch/x86/mm/fault.c 	pud_t *pud;
pud               471 arch/x86/mm/fault.c 	pud = pud_offset(p4d, address);
pud               472 arch/x86/mm/fault.c 	if (bad_address(pud))
pud               475 arch/x86/mm/fault.c 	pr_cont("PUD %lx ", pud_val(*pud));
pud               476 arch/x86/mm/fault.c 	if (!pud_present(*pud) || pud_large(*pud))
pud               479 arch/x86/mm/fault.c 	pmd = pmd_offset(pud, address);
pud              1125 arch/x86/mm/fault.c 	pud_t *pud;
pud              1154 arch/x86/mm/fault.c 	pud = pud_offset(p4d, address);
pud              1155 arch/x86/mm/fault.c 	if (!pud_present(*pud))
pud              1158 arch/x86/mm/fault.c 	if (pud_large(*pud))
pud              1159 arch/x86/mm/fault.c 		return spurious_kernel_fault_check(error_code, (pte_t *) pud);
pud              1161 arch/x86/mm/fault.c 	pmd = pmd_offset(pud, address);
pud                55 arch/x86/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                73 arch/x86/mm/hugetlbpage.c int pud_huge(pud_t pud)
pud                75 arch/x86/mm/hugetlbpage.c 	return !!(pud_val(pud) & _PAGE_PSE);
pud                27 arch/x86/mm/ident_map.c 		pud_t *pud = pud_page + pud_index(addr);
pud                37 arch/x86/mm/ident_map.c 			if (pud_present(*pud))
pud                42 arch/x86/mm/ident_map.c 			set_pud(pud, pudval);
pud                46 arch/x86/mm/ident_map.c 		if (pud_present(*pud)) {
pud                47 arch/x86/mm/ident_map.c 			pmd = pmd_offset(pud, 0);
pud                55 arch/x86/mm/ident_map.c 		set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
pud                68 arch/x86/mm/ident_map.c 		pud_t *pud;
pud                75 arch/x86/mm/ident_map.c 			pud = pud_offset(p4d, 0);
pud                76 arch/x86/mm/ident_map.c 			ident_pud_init(info, pud, addr, next);
pud                79 arch/x86/mm/ident_map.c 		pud = (pud_t *)info->alloc_pgt_page(info->context);
pud                80 arch/x86/mm/ident_map.c 		if (!pud)
pud                82 arch/x86/mm/ident_map.c 		ident_pud_init(info, pud, addr, next);
pud                83 arch/x86/mm/ident_map.c 		set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
pud               133 arch/x86/mm/ident_map.c 			pud_t *pud = pud_offset(p4d, 0);
pud               134 arch/x86/mm/ident_map.c 			set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
pud                70 arch/x86/mm/init_32.c 	pud_t *pud;
pud                79 arch/x86/mm/init_32.c 		pud = pud_offset(p4d, 0);
pud                80 arch/x86/mm/init_32.c 		BUG_ON(pmd_table != pmd_offset(pud, 0));
pud                86 arch/x86/mm/init_32.c 	pud = pud_offset(p4d, 0);
pud                87 arch/x86/mm/init_32.c 	pmd_table = pmd_offset(pud, 0);
pud               397 arch/x86/mm/init_32.c 	pud_t *pud = pud_offset(p4d, vaddr);
pud               398 arch/x86/mm/init_32.c 	pmd_t *pmd = pmd_offset(pud, vaddr);
pud               419 arch/x86/mm/init_32.c 	pud_t *pud;
pud               428 arch/x86/mm/init_32.c 	pud = pud_offset(p4d, vaddr);
pud               429 arch/x86/mm/init_32.c 	pmd = pmd_offset(pud, vaddr);
pud               476 arch/x86/mm/init_32.c 	pud_t *pud;
pud               496 arch/x86/mm/init_32.c 		pud = pud_offset(p4d, va);
pud               497 arch/x86/mm/init_32.c 		pmd = pmd_offset(pud, va);
pud                72 arch/x86/mm/init_64.c DEFINE_POPULATE(p4d_populate, p4d, pud, init)
pud                74 arch/x86/mm/init_64.c DEFINE_POPULATE(pud_populate, pud, pmd, init)
pud                88 arch/x86/mm/init_64.c DEFINE_ENTRY(pud, pud, init)
pud               258 arch/x86/mm/init_64.c 		pud_t *pud = (pud_t *)spp_getpage();
pud               259 arch/x86/mm/init_64.c 		p4d_populate(&init_mm, p4d, pud);
pud               260 arch/x86/mm/init_64.c 		if (pud != pud_offset(p4d, 0))
pud               262 arch/x86/mm/init_64.c 			       pud, pud_offset(p4d, 0));
pud               267 arch/x86/mm/init_64.c static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
pud               269 arch/x86/mm/init_64.c 	if (pud_none(*pud)) {
pud               271 arch/x86/mm/init_64.c 		pud_populate(&init_mm, pud, pmd);
pud               272 arch/x86/mm/init_64.c 		if (pmd != pmd_offset(pud, 0))
pud               274 arch/x86/mm/init_64.c 			       pmd, pmd_offset(pud, 0));
pud               276 arch/x86/mm/init_64.c 	return pmd_offset(pud, vaddr);
pud               290 arch/x86/mm/init_64.c static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
pud               292 arch/x86/mm/init_64.c 	pmd_t *pmd = fill_pmd(pud, vaddr);
pud               307 arch/x86/mm/init_64.c 	pud_t *pud = fill_pud(p4d, vaddr);
pud               309 arch/x86/mm/init_64.c 	__set_pte_vaddr(pud, vaddr, new_pte);
pud               314 arch/x86/mm/init_64.c 	pud_t *pud = pud_page + pud_index(vaddr);
pud               316 arch/x86/mm/init_64.c 	__set_pte_vaddr(pud, vaddr, new_pte);
pud               341 arch/x86/mm/init_64.c 	pud_t *pud;
pud               345 arch/x86/mm/init_64.c 	pud = fill_pud(p4d, vaddr);
pud               346 arch/x86/mm/init_64.c 	return fill_pmd(pud, vaddr);
pud               365 arch/x86/mm/init_64.c 	pud_t *pud;
pud               381 arch/x86/mm/init_64.c 			pud = (pud_t *) spp_getpage();
pud               382 arch/x86/mm/init_64.c 			set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE |
pud               385 arch/x86/mm/init_64.c 		pud = pud_offset(p4d, (unsigned long)__va(phys));
pud               386 arch/x86/mm/init_64.c 		if (pud_none(*pud)) {
pud               388 arch/x86/mm/init_64.c 			set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
pud               391 arch/x86/mm/init_64.c 		pmd = pmd_offset(pud, phys);
pud               596 arch/x86/mm/init_64.c 		pud_t *pud;
pud               601 arch/x86/mm/init_64.c 		pud = pud_page + pud_index(vaddr);
pud               610 arch/x86/mm/init_64.c 				set_pud_init(pud, __pud(0), init);
pud               614 arch/x86/mm/init_64.c 		if (!pud_none(*pud)) {
pud               615 arch/x86/mm/init_64.c 			if (!pud_large(*pud)) {
pud               616 arch/x86/mm/init_64.c 				pmd = pmd_offset(pud, 0);
pud               641 arch/x86/mm/init_64.c 			prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
pud               647 arch/x86/mm/init_64.c 			set_pte_init((pte_t *)pud,
pud               661 arch/x86/mm/init_64.c 		pud_populate_init(&init_mm, pud, pmd, init);
pud               686 arch/x86/mm/init_64.c 		pud_t *pud;
pud               703 arch/x86/mm/init_64.c 			pud = pud_offset(p4d, 0);
pud               704 arch/x86/mm/init_64.c 			paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
pud               709 arch/x86/mm/init_64.c 		pud = alloc_low_page();
pud               710 arch/x86/mm/init_64.c 		paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
pud               714 arch/x86/mm/init_64.c 		p4d_populate_init(&init_mm, p4d, pud, init);
pud               921 arch/x86/mm/init_64.c static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
pud               933 arch/x86/mm/init_64.c 	free_pagetable(pud_page(*pud), 0);
pud               935 arch/x86/mm/init_64.c 	pud_clear(pud);
pud               941 arch/x86/mm/init_64.c 	pud_t *pud;
pud               945 arch/x86/mm/init_64.c 		pud = pud_start + i;
pud               946 arch/x86/mm/init_64.c 		if (!pud_none(*pud))
pud              1090 arch/x86/mm/init_64.c 	pud_t *pud;
pud              1093 arch/x86/mm/init_64.c 	pud = pud_start + pud_index(addr);
pud              1094 arch/x86/mm/init_64.c 	for (; addr < end; addr = next, pud++) {
pud              1097 arch/x86/mm/init_64.c 		if (!pud_present(*pud))
pud              1100 arch/x86/mm/init_64.c 		if (pud_large(*pud)) {
pud              1104 arch/x86/mm/init_64.c 					free_pagetable(pud_page(*pud),
pud              1108 arch/x86/mm/init_64.c 				pud_clear(pud);
pud              1115 arch/x86/mm/init_64.c 				page_addr = page_address(pud_page(*pud));
pud              1118 arch/x86/mm/init_64.c 					free_pagetable(pud_page(*pud),
pud              1122 arch/x86/mm/init_64.c 					pud_clear(pud);
pud              1130 arch/x86/mm/init_64.c 		pmd_base = pmd_offset(pud, 0);
pud              1132 arch/x86/mm/init_64.c 		free_pmd_table(pmd_base, pud);
pud              1346 arch/x86/mm/init_64.c 	pud_t *pud;
pud              1361 arch/x86/mm/init_64.c 	pud = pud_offset(p4d, addr);
pud              1362 arch/x86/mm/init_64.c 	if (pud_none(*pud))
pud              1365 arch/x86/mm/init_64.c 	if (pud_large(*pud))
pud              1366 arch/x86/mm/init_64.c 		return pfn_valid(pud_pfn(*pud));
pud              1368 arch/x86/mm/init_64.c 	pmd = pmd_offset(pud, addr);
pud              1456 arch/x86/mm/init_64.c 	pud_t *pud;
pud              1470 arch/x86/mm/init_64.c 		pud = vmemmap_pud_populate(p4d, addr, node);
pud              1471 arch/x86/mm/init_64.c 		if (!pud)
pud              1474 arch/x86/mm/init_64.c 		pmd = pmd_offset(pud, addr);
pud              1543 arch/x86/mm/init_64.c 	pud_t *pud;
pud              1565 arch/x86/mm/init_64.c 		pud = pud_offset(p4d, addr);
pud              1566 arch/x86/mm/init_64.c 		if (pud_none(*pud)) {
pud              1570 arch/x86/mm/init_64.c 		get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
pud              1574 arch/x86/mm/init_64.c 			pmd = pmd_offset(pud, addr);
pud              1588 arch/x86/mm/init_64.c 			pmd = pmd_offset(pud, addr);
pud               807 arch/x86/mm/ioremap.c 	pud_t *pud = pud_offset(p4d, addr);
pud               808 arch/x86/mm/ioremap.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud                75 arch/x86/mm/kasan_init_64.c static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
pud                81 arch/x86/mm/kasan_init_64.c 	if (pud_none(*pud)) {
pud                88 arch/x86/mm/kasan_init_64.c 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
pud                95 arch/x86/mm/kasan_init_64.c 		pud_populate(&init_mm, pud, p);
pud                98 arch/x86/mm/kasan_init_64.c 	pmd = pmd_offset(pud, addr);
pud               109 arch/x86/mm/kasan_init_64.c 	pud_t *pud;
pud               118 arch/x86/mm/kasan_init_64.c 	pud = pud_offset(p4d, addr);
pud               121 arch/x86/mm/kasan_init_64.c 		if (!pud_large(*pud))
pud               122 arch/x86/mm/kasan_init_64.c 			kasan_populate_pud(pud, addr, next, nid);
pud               123 arch/x86/mm/kasan_init_64.c 	} while (pud++, addr = next, addr != end);
pud               153 arch/x86/mm/kaslr.c 	pud_t *pud_page_tramp, *pud, *pud_tramp;
pud               172 arch/x86/mm/kaslr.c 	pud = pud_offset(p4d, vaddr);
pud               175 arch/x86/mm/kaslr.c 	*pud_tramp = *pud;
pud               109 arch/x86/mm/mem_encrypt_identity.c 	pud_t *pud;
pud               122 arch/x86/mm/mem_encrypt_identity.c 		pud = ppd->pgtable_area;
pud               123 arch/x86/mm/mem_encrypt_identity.c 		memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
pud               124 arch/x86/mm/mem_encrypt_identity.c 		ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
pud               125 arch/x86/mm/mem_encrypt_identity.c 		set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
pud               128 arch/x86/mm/mem_encrypt_identity.c 	pud = pud_offset(p4d, ppd->vaddr);
pud               129 arch/x86/mm/mem_encrypt_identity.c 	if (pud_none(*pud)) {
pud               133 arch/x86/mm/mem_encrypt_identity.c 		set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
pud               136 arch/x86/mm/mem_encrypt_identity.c 	if (pud_large(*pud))
pud               139 arch/x86/mm/mem_encrypt_identity.c 	return pud;
pud               144 arch/x86/mm/mem_encrypt_identity.c 	pud_t *pud;
pud               147 arch/x86/mm/mem_encrypt_identity.c 	pud = sme_prepare_pgd(ppd);
pud               148 arch/x86/mm/mem_encrypt_identity.c 	if (!pud)
pud               151 arch/x86/mm/mem_encrypt_identity.c 	pmd = pmd_offset(pud, ppd->vaddr);
pud               160 arch/x86/mm/mem_encrypt_identity.c 	pud_t *pud;
pud               164 arch/x86/mm/mem_encrypt_identity.c 	pud = sme_prepare_pgd(ppd);
pud               165 arch/x86/mm/mem_encrypt_identity.c 	if (!pud)
pud               168 arch/x86/mm/mem_encrypt_identity.c 	pmd = pmd_offset(pud, ppd->vaddr);
pud               571 arch/x86/mm/pageattr.c 	pud_t *pud;
pud               587 arch/x86/mm/pageattr.c 	pud = pud_offset(p4d, address);
pud               588 arch/x86/mm/pageattr.c 	if (pud_none(*pud))
pud               592 arch/x86/mm/pageattr.c 	if (pud_large(*pud) || !pud_present(*pud))
pud               593 arch/x86/mm/pageattr.c 		return (pte_t *)pud;
pud               595 arch/x86/mm/pageattr.c 	pmd = pmd_offset(pud, address);
pud               640 arch/x86/mm/pageattr.c 	pud_t *pud;
pud               650 arch/x86/mm/pageattr.c 	pud = pud_offset(p4d, address);
pud               651 arch/x86/mm/pageattr.c 	if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
pud               654 arch/x86/mm/pageattr.c 	return pmd_offset(pud, address);
pud               716 arch/x86/mm/pageattr.c 			pud_t *pud;
pud               721 arch/x86/mm/pageattr.c 			pud = pud_offset(p4d, address);
pud               722 arch/x86/mm/pageattr.c 			pmd = pmd_offset(pud, address);
pud              1106 arch/x86/mm/pageattr.c static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
pud              1110 arch/x86/mm/pageattr.c 		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
pud              1111 arch/x86/mm/pageattr.c 			pud_clear(pud);
pud              1114 arch/x86/mm/pageattr.c static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
pud              1116 arch/x86/mm/pageattr.c 	pmd_t *pmd = pmd_offset(pud, start);
pud              1125 arch/x86/mm/pageattr.c 		__unmap_pmd_range(pud, pmd, start, pre_end);
pud              1138 arch/x86/mm/pageattr.c 			__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
pud              1148 arch/x86/mm/pageattr.c 		return __unmap_pmd_range(pud, pmd, start, end);
pud              1153 arch/x86/mm/pageattr.c 	if (!pud_none(*pud))
pud              1154 arch/x86/mm/pageattr.c 		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
pud              1155 arch/x86/mm/pageattr.c 			pud_clear(pud);
pud              1160 arch/x86/mm/pageattr.c 	pud_t *pud = pud_offset(p4d, start);
pud              1169 arch/x86/mm/pageattr.c 		unmap_pmd_range(pud, start, pre_end);
pud              1172 arch/x86/mm/pageattr.c 		pud++;
pud              1180 arch/x86/mm/pageattr.c 		if (pud_large(*pud))
pud              1181 arch/x86/mm/pageattr.c 			pud_clear(pud);
pud              1183 arch/x86/mm/pageattr.c 			unmap_pmd_range(pud, start, start + PUD_SIZE);
pud              1186 arch/x86/mm/pageattr.c 		pud++;
pud              1193 arch/x86/mm/pageattr.c 		unmap_pmd_range(pud, start, end);
pud              1211 arch/x86/mm/pageattr.c static int alloc_pmd_page(pud_t *pud)
pud              1217 arch/x86/mm/pageattr.c 	set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
pud              1242 arch/x86/mm/pageattr.c 			 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
pud              1262 arch/x86/mm/pageattr.c 		pmd = pmd_offset(pud, start);
pud              1285 arch/x86/mm/pageattr.c 		if (pud_none(*pud))
pud              1286 arch/x86/mm/pageattr.c 			if (alloc_pmd_page(pud))
pud              1289 arch/x86/mm/pageattr.c 		pmd = pmd_offset(pud, start);
pud              1303 arch/x86/mm/pageattr.c 		pmd = pmd_offset(pud, start);
pud              1317 arch/x86/mm/pageattr.c 	pud_t *pud;
pud              1336 arch/x86/mm/pageattr.c 		pud = pud_offset(p4d, start);
pud              1341 arch/x86/mm/pageattr.c 		if (pud_none(*pud))
pud              1342 arch/x86/mm/pageattr.c 			if (alloc_pmd_page(pud))
pud              1346 arch/x86/mm/pageattr.c 					 pud, pgprot);
pud              1357 arch/x86/mm/pageattr.c 	pud = pud_offset(p4d, start);
pud              1364 arch/x86/mm/pageattr.c 		set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
pud              1370 arch/x86/mm/pageattr.c 		pud++;
pud              1377 arch/x86/mm/pageattr.c 		pud = pud_offset(p4d, start);
pud              1378 arch/x86/mm/pageattr.c 		if (pud_none(*pud))
pud              1379 arch/x86/mm/pageattr.c 			if (alloc_pmd_page(pud))
pud              1383 arch/x86/mm/pageattr.c 				   pud, pgprot);
pud              1399 arch/x86/mm/pageattr.c 	pud_t *pud = NULL;	/* shut up gcc */
pud              1419 arch/x86/mm/pageattr.c 		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
pud              1420 arch/x86/mm/pageattr.c 		if (!pud)
pud              1423 arch/x86/mm/pageattr.c 		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
pud                70 arch/x86/mm/pgtable.c void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
pud                72 arch/x86/mm/pgtable.c 	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
pud                73 arch/x86/mm/pgtable.c 	paravirt_tlb_remove_table(tlb, virt_to_page(pud));
pud               292 arch/x86/mm/pgtable.c 	pud_t *pud;
pud               299 arch/x86/mm/pgtable.c 	pud = pud_offset(p4d, 0);
pud               301 arch/x86/mm/pgtable.c 	for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
pud               308 arch/x86/mm/pgtable.c 		pud_populate(mm, pud, pmd);
pud               696 arch/x86/mm/pgtable.c int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
pud               706 arch/x86/mm/pgtable.c 	if (pud_present(*pud) && !pud_huge(*pud))
pud               711 arch/x86/mm/pgtable.c 	set_pte((pte_t *)pud, pfn_pte(
pud               755 arch/x86/mm/pgtable.c int pud_clear_huge(pud_t *pud)
pud               757 arch/x86/mm/pgtable.c 	if (pud_large(*pud)) {
pud               758 arch/x86/mm/pgtable.c 		pud_clear(pud);
pud               799 arch/x86/mm/pgtable.c int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pud               805 arch/x86/mm/pgtable.c 	pmd = (pmd_t *)pud_page_vaddr(*pud);
pud               816 arch/x86/mm/pgtable.c 	pud_clear(pud);
pud               859 arch/x86/mm/pgtable.c int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pud               861 arch/x86/mm/pgtable.c 	return pud_none(*pud);
pud                32 arch/x86/mm/pgtable_32.c 	pud_t *pud;
pud                46 arch/x86/mm/pgtable_32.c 	pud = pud_offset(p4d, vaddr);
pud                47 arch/x86/mm/pgtable_32.c 	if (pud_none(*pud)) {
pud                51 arch/x86/mm/pgtable_32.c 	pmd = pmd_offset(pud, vaddr);
pud               204 arch/x86/mm/pti.c 	pud_t *pud;
pud               219 arch/x86/mm/pti.c 	pud = pud_offset(p4d, address);
pud               221 arch/x86/mm/pti.c 	if (pud_large(*pud)) {
pud               225 arch/x86/mm/pti.c 	if (pud_none(*pud)) {
pud               230 arch/x86/mm/pti.c 		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
pud               233 arch/x86/mm/pti.c 	return pmd_offset(pud, address);
pud               318 arch/x86/mm/pti.c 		pud_t *pud;
pud               331 arch/x86/mm/pti.c 		pud = pud_offset(p4d, addr);
pud               332 arch/x86/mm/pti.c 		if (pud_none(*pud)) {
pud               338 arch/x86/mm/pti.c 		pmd = pmd_offset(pud, addr);
pud                80 arch/x86/platform/efi/efi_64.c 	pud_t *pud;
pud               121 arch/x86/platform/efi/efi_64.c 			pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
pud               122 arch/x86/platform/efi/efi_64.c 			if (!pud) {
pud               137 arch/x86/platform/efi/efi_64.c 				pud[j] = *pud_offset(p4d_k, vaddr);
pud               159 arch/x86/platform/efi/efi_64.c 	pud_t *pud;
pud               182 arch/x86/platform/efi/efi_64.c 			pud = (pud_t *)p4d_page_vaddr(*p4d);
pud               183 arch/x86/platform/efi/efi_64.c 			pud_free(&init_mm, pud);
pud               211 arch/x86/platform/efi/efi_64.c 	pud_t *pud;
pud               229 arch/x86/platform/efi/efi_64.c 	pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
pud               230 arch/x86/platform/efi/efi_64.c 	if (!pud) {
pud               215 arch/x86/power/hibernate.c 	pud_t *pud;
pud               233 arch/x86/power/hibernate.c 	pud = pud_offset(p4d, relocated_restore_code);
pud               234 arch/x86/power/hibernate.c 	if (pud_large(*pud)) {
pud               235 arch/x86/power/hibernate.c 		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
pud               238 arch/x86/power/hibernate.c 	pmd = pmd_offset(pud, relocated_restore_code);
pud                33 arch/x86/power/hibernate_32.c 	pud_t *pud;
pud                43 arch/x86/power/hibernate_32.c 	pud = pud_offset(p4d, 0);
pud                45 arch/x86/power/hibernate_32.c 	BUG_ON(pmd_table != pmd_offset(pud, 0));
pud                48 arch/x86/power/hibernate_32.c 	pud = pud_offset(p4d, 0);
pud                49 arch/x86/power/hibernate_32.c 	pmd_table = pmd_offset(pud, 0);
pud                31 arch/x86/power/hibernate_64.c 	pud_t *pud;
pud                60 arch/x86/power/hibernate_64.c 	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
pud                61 arch/x86/power/hibernate_64.c 	if (!pud)
pud                70 arch/x86/power/hibernate_64.c 	set_pud(pud + pud_index(restore_jump_address),
pud                73 arch/x86/power/hibernate_64.c 		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
pud                80 arch/x86/power/hibernate_64.c 		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
pud               470 arch/x86/xen/mmu_pv.c __visible pudval_t xen_pud_val(pud_t pud)
pud               472 arch/x86/xen/mmu_pv.c 	return pte_mfn_to_pfn(pud.pud);
pud               476 arch/x86/xen/mmu_pv.c __visible pud_t xen_make_pud(pudval_t pud)
pud               478 arch/x86/xen/mmu_pv.c 	pud = pte_pfn_to_mfn(pud);
pud               480 arch/x86/xen/mmu_pv.c 	return native_make_pud(pud);
pud               590 arch/x86/xen/mmu_pv.c static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
pud               600 arch/x86/xen/mmu_pv.c 		if (pud_none(pud[i]))
pud               603 arch/x86/xen/mmu_pv.c 		pmd = pmd_offset(&pud[i], 0);
pud               617 arch/x86/xen/mmu_pv.c 	pud_t *pud;
pud               623 arch/x86/xen/mmu_pv.c 	pud = pud_offset(p4d, 0);
pud               625 arch/x86/xen/mmu_pv.c 		flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
pud               626 arch/x86/xen/mmu_pv.c 	flush |= xen_pud_walk(mm, pud, func, last, limit);
pud              1160 arch/x86/xen/mmu_pv.c static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
pud              1166 arch/x86/xen/mmu_pv.c 	if (pud_large(*pud)) {
pud              1167 arch/x86/xen/mmu_pv.c 		pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
pud              1172 arch/x86/xen/mmu_pv.c 	pmd_tbl = pmd_offset(pud, 0);
pud              1178 arch/x86/xen/mmu_pv.c 	set_pud(pud, __pud(0));
pud              1914 arch/x86/xen/mmu_pv.c 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
pud              2016 arch/x86/xen/mmu_pv.c 	pud_t pud;
pud              2027 arch/x86/xen/mmu_pv.c 	pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
pud              2028 arch/x86/xen/mmu_pv.c 						       sizeof(pud)));
pud              2029 arch/x86/xen/mmu_pv.c 	if (!pud_present(pud))
pud              2031 arch/x86/xen/mmu_pv.c 	pa = pud_val(pud) & PTE_PFN_MASK;
pud              2032 arch/x86/xen/mmu_pv.c 	if (pud_large(pud))
pud              2063 arch/x86/xen/mmu_pv.c 	pud_t *pud;
pud              2096 arch/x86/xen/mmu_pv.c 		pud = early_memremap(pud_phys, PAGE_SIZE);
pud              2097 arch/x86/xen/mmu_pv.c 		clear_page(pud);
pud              2126 arch/x86/xen/mmu_pv.c 			pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
pud              2130 arch/x86/xen/mmu_pv.c 		early_memunmap(pud, PAGE_SIZE);
pud               655 drivers/pinctrl/sh-pfc/core.c 			if (pfc->info->bias_regs[i].pud)
pud               656 drivers/pinctrl/sh-pfc/core.c 				do_reg(pfc, pfc->info->bias_regs[i].pud, n++);
pud              5835 drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c 	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
pud              5856 drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c 	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
pud              5860 drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c 	sh_pfc_write(pfc, reg->pud, updown);
pud              6187 drivers/pinctrl/sh-pfc/pfc-r8a7795.c 	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
pud              6208 drivers/pinctrl/sh-pfc/pfc-r8a7795.c 	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
pud              6212 drivers/pinctrl/sh-pfc/pfc-r8a7795.c 	sh_pfc_write(pfc, reg->pud, updown);
pud              6153 drivers/pinctrl/sh-pfc/pfc-r8a7796.c 	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
pud              6174 drivers/pinctrl/sh-pfc/pfc-r8a7796.c 	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
pud              6178 drivers/pinctrl/sh-pfc/pfc-r8a7796.c 	sh_pfc_write(pfc, reg->pud, updown);
pud              6393 drivers/pinctrl/sh-pfc/pfc-r8a77965.c 	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
pud              6414 drivers/pinctrl/sh-pfc/pfc-r8a77965.c 	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
pud              6418 drivers/pinctrl/sh-pfc/pfc-r8a77965.c 	sh_pfc_write(pfc, reg->pud, updown);
pud              5240 drivers/pinctrl/sh-pfc/pfc-r8a77990.c 	else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
pud              5261 drivers/pinctrl/sh-pfc/pfc-r8a77990.c 	updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
pud              5265 drivers/pinctrl/sh-pfc/pfc-r8a77990.c 	sh_pfc_write(pfc, reg->pud, updown);
pud               184 drivers/pinctrl/sh-pfc/sh_pfc.h 	u32 pud;		/* Pull-up/down control register (optional) */
pud               190 drivers/pinctrl/sh-pfc/sh_pfc.h 	.pud = r2,	\
pud               284 fs/userfaultfd.c 	pud_t *pud;
pud               297 fs/userfaultfd.c 	pud = pud_offset(p4d, address);
pud               298 fs/userfaultfd.c 	if (!pud_present(*pud))
pud               300 fs/userfaultfd.c 	pmd = pmd_offset(pud, address);
pud                15 include/asm-generic/4level-fixup.h #define pmd_alloc(mm, pud, address) \
pud                16 include/asm-generic/4level-fixup.h 	((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
pud                17 include/asm-generic/4level-fixup.h  		NULL: pmd_offset(pud, address))
pud                20 include/asm-generic/4level-fixup.h #define pud_none(pud)			0
pud                21 include/asm-generic/4level-fixup.h #define pud_bad(pud)			0
pud                22 include/asm-generic/4level-fixup.h #define pud_present(pud)		1
pud                23 include/asm-generic/4level-fixup.h #define pud_ERROR(pud)			do { } while (0)
pud                24 include/asm-generic/4level-fixup.h #define pud_clear(pud)			pgd_clear(pud)
pud                25 include/asm-generic/4level-fixup.h #define pud_val(pud)			pgd_val(pud)
pud                26 include/asm-generic/4level-fixup.h #define pud_populate(mm, pud, pmd)	pgd_populate(mm, pud, pmd)
pud                27 include/asm-generic/4level-fixup.h #define pud_page(pud)			pgd_page(pud)
pud                28 include/asm-generic/4level-fixup.h #define pud_page_vaddr(pud)		pgd_page_vaddr(pud)
pud                43 include/asm-generic/5level-fixup.h #define p4d_populate(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
pud                44 include/asm-generic/5level-fixup.h #define p4d_populate_safe(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
pud                31 include/asm-generic/pgtable-nop4d-hack.h #define pud_ERROR(pud)				(pgd_ERROR((pud).pgd))
pud                33 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate(mm, pgd, pud)		do { } while (0)
pud                34 include/asm-generic/pgtable-nop4d-hack.h #define pgd_populate_safe(mm, pgd, pud)		do { } while (0)
pud                18 include/asm-generic/pgtable-nopmd.h typedef struct { pud_t pud; } pmd_t;
pud                30 include/asm-generic/pgtable-nopmd.h static inline int pud_none(pud_t pud)		{ return 0; }
pud                31 include/asm-generic/pgtable-nopmd.h static inline int pud_bad(pud_t pud)		{ return 0; }
pud                32 include/asm-generic/pgtable-nopmd.h static inline int pud_present(pud_t pud)	{ return 1; }
pud                33 include/asm-generic/pgtable-nopmd.h static inline void pud_clear(pud_t *pud)	{ }
pud                34 include/asm-generic/pgtable-nopmd.h #define pmd_ERROR(pmd)				(pud_ERROR((pmd).pud))
pud                44 include/asm-generic/pgtable-nopmd.h static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
pud                46 include/asm-generic/pgtable-nopmd.h 	return (pmd_t *)pud;
pud                49 include/asm-generic/pgtable-nopmd.h #define pmd_val(x)				(pud_val((x).pud))
pud                52 include/asm-generic/pgtable-nopmd.h #define pud_page(pud)				(pmd_page((pmd_t){ pud }))
pud                53 include/asm-generic/pgtable-nopmd.h #define pud_page_vaddr(pud)			(pmd_page_vaddr((pmd_t){ pud }))
pud                35 include/asm-generic/pgtable-nopud.h #define pud_ERROR(pud)				(p4d_ERROR((pud).p4d))
pud                37 include/asm-generic/pgtable-nopud.h #define p4d_populate(mm, p4d, pud)		do { } while (0)
pud                38 include/asm-generic/pgtable-nopud.h #define p4d_populate_safe(mm, p4d, pud)		do { } while (0)
pud               151 include/asm-generic/pgtable.h 	pud_t pud = *pudp;
pud               154 include/asm-generic/pgtable.h 	return pud;
pud               363 include/asm-generic/pgtable.h #define pud_access_permitted(pud, write) \
pud               364 include/asm-generic/pgtable.h 	(pud_present(pud) && (!(write) || pud_write(pud)))
pud               423 include/asm-generic/pgtable.h #define set_pud_safe(pudp, pud) \
pud               425 include/asm-generic/pgtable.h 	WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
pud               426 include/asm-generic/pgtable.h 	set_pud(pudp, pud); \
pud               587 include/asm-generic/pgtable.h static inline int pud_none_or_clear_bad(pud_t *pud)
pud               589 include/asm-generic/pgtable.h 	if (pud_none(*pud))
pud               591 include/asm-generic/pgtable.h 	if (unlikely(pud_bad(*pud))) {
pud               592 include/asm-generic/pgtable.h 		pud_clear_bad(pud);
pud               899 include/asm-generic/pgtable.h static inline int pud_write(pud_t pud)
pud               909 include/asm-generic/pgtable.h static inline int pud_trans_huge(pud_t pud)
pud              1055 include/asm-generic/pgtable.h int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
pud              1057 include/asm-generic/pgtable.h int pud_clear_huge(pud_t *pud);
pud              1060 include/asm-generic/pgtable.h int pud_free_pmd_page(pud_t *pud, unsigned long addr);
pud              1067 include/asm-generic/pgtable.h static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
pud              1079 include/asm-generic/pgtable.h static inline int pud_clear_huge(pud_t *pud)
pud              1091 include/asm-generic/pgtable.h static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pud                40 include/linux/huge_mm.h 			pud_t *pud, unsigned long addr);
pud               188 include/linux/huge_mm.h void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
pud               207 include/linux/huge_mm.h extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
pud               225 include/linux/huge_mm.h static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
pud               229 include/linux/huge_mm.h 	if (pud_trans_huge(*pud) || pud_devmap(*pud))
pud               230 include/linux/huge_mm.h 		return __pud_trans_huge_lock(pud, vma);
pud               244 include/linux/huge_mm.h 		pud_t *pud, int flags, struct dev_pagemap **pgmap);
pud               260 include/linux/huge_mm.h static inline bool is_huge_zero_pud(pud_t pud)
pud               365 include/linux/huge_mm.h static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
pud               382 include/linux/huge_mm.h static inline bool is_huge_zero_pud(pud_t pud)
pud               399 include/linux/huge_mm.h 	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
pud               111 include/linux/hugetlb.h pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
pud               133 include/linux/hugetlb.h 				pud_t *pud, int flags);
pud               138 include/linux/hugetlb.h int pud_huge(pud_t pud);
pud               179 include/linux/hugetlb.h #define follow_huge_pud(mm, addr, pud, flags)	NULL
pud               425 include/linux/mm.h 	pud_t *pud;			/* Pointer to pud entry matching
pud               572 include/linux/mm.h static inline int pud_devmap(pud_t pud)
pud              1798 include/linux/mm.h static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
pud              1808 include/linux/mm.h int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
pud              1882 include/linux/mm.h static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
pud              1884 include/linux/mm.h 	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
pud              1885 include/linux/mm.h 		NULL: pmd_offset(pud, address);
pud              2060 include/linux/mm.h static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
pud              2065 include/linux/mm.h static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
pud              2067 include/linux/mm.h 	spinlock_t *ptl = pud_lockptr(mm, pud);
pud              2778 include/linux/mm.h pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pud                29 include/linux/pagewalk.h 	int (*pud_entry)(pud_t *pud, unsigned long addr,
pud               116 include/linux/pfn_t.h pud_t pud_mkdevmap(pud_t pud);
pud               103 lib/ioremap.c  static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
pud               109 lib/ioremap.c  	pmd = pmd_alloc(&init_mm, pud, addr);
pud               124 lib/ioremap.c  static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
pud               140 lib/ioremap.c  	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
pud               143 lib/ioremap.c  	return pud_set_huge(pud, phys_addr, prot);
pud               149 lib/ioremap.c  	pud_t *pud;
pud               152 lib/ioremap.c  	pud = pud_alloc(&init_mm, p4d, addr);
pud               153 lib/ioremap.c  	if (!pud)
pud               158 lib/ioremap.c  		if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
pud               161 lib/ioremap.c  		if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
pud               163 lib/ioremap.c  	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
pud               437 mm/gup.c       	pud_t *pud;
pud               442 mm/gup.c       	pud = pud_offset(p4dp, address);
pud               443 mm/gup.c       	if (pud_none(*pud))
pud               445 mm/gup.c       	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
pud               446 mm/gup.c       		page = follow_huge_pud(mm, address, pud, flags);
pud               451 mm/gup.c       	if (is_hugepd(__hugepd(pud_val(*pud)))) {
pud               453 mm/gup.c       				      __hugepd(pud_val(*pud)), flags,
pud               459 mm/gup.c       	if (pud_devmap(*pud)) {
pud               460 mm/gup.c       		ptl = pud_lock(mm, pud);
pud               461 mm/gup.c       		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
pud               466 mm/gup.c       	if (unlikely(pud_bad(*pud)))
pud               469 mm/gup.c       	return follow_pmd_mask(vma, address, pud, flags, ctx);
pud               575 mm/gup.c       	pud_t *pud;
pud               592 mm/gup.c       	pud = pud_offset(p4d, address);
pud               593 mm/gup.c       	if (pud_none(*pud))
pud               595 mm/gup.c       	pmd = pmd_offset(pud, address);
pud              1974 mm/gup.c       static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
pud              2187 mm/gup.c       static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
pud              2193 mm/gup.c       	pmdp = pmd_offset(&pud, addr);
pud              2238 mm/gup.c       		pud_t pud = READ_ONCE(*pudp);
pud              2241 mm/gup.c       		if (pud_none(pud))
pud              2243 mm/gup.c       		if (unlikely(pud_huge(pud))) {
pud              2244 mm/gup.c       			if (!gup_huge_pud(pud, pudp, addr, next, flags,
pud              2247 mm/gup.c       		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
pud              2248 mm/gup.c       			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
pud              2251 mm/gup.c       		} else if (!gup_pmd_range(pud, addr, next, flags, pages, nr))
pud               647 mm/hmm.c       static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
pud               649 mm/hmm.c       	if (!pud_present(pud))
pud               651 mm/hmm.c       	return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
pud               663 mm/hmm.c       	pud_t pud;
pud               667 mm/hmm.c       	pud = READ_ONCE(*pudp);
pud               668 mm/hmm.c       	if (pud_none(pud))
pud               671 mm/hmm.c       	if (pud_huge(pud) && pud_devmap(pud)) {
pud               676 mm/hmm.c       		if (!pud_present(pud))
pud               683 mm/hmm.c       		cpu_flags = pud_to_hmm_pfn_flags(range, pud);
pud               690 mm/hmm.c       		pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
pud               851 mm/huge_memory.c static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
pud               854 mm/huge_memory.c 		pud = pud_mkwrite(pud);
pud               855 mm/huge_memory.c 	return pud;
pud               859 mm/huge_memory.c 		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
pud               865 mm/huge_memory.c 	ptl = pud_lock(mm, pud);
pud               866 mm/huge_memory.c 	if (!pud_none(*pud)) {
pud               868 mm/huge_memory.c 			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
pud               869 mm/huge_memory.c 				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
pud               872 mm/huge_memory.c 			entry = pud_mkyoung(*pud);
pud               874 mm/huge_memory.c 			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
pud               875 mm/huge_memory.c 				update_mmu_cache_pud(vma, addr, pud);
pud               887 mm/huge_memory.c 	set_pud_at(mm, addr, pud, entry);
pud               888 mm/huge_memory.c 	update_mmu_cache_pud(vma, addr, pud);
pud               916 mm/huge_memory.c 	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
pud              1069 mm/huge_memory.c 		pud_t *pud, int flags)
pud              1073 mm/huge_memory.c 	_pud = pud_mkyoung(*pud);
pud              1077 mm/huge_memory.c 				pud, _pud, flags & FOLL_WRITE))
pud              1078 mm/huge_memory.c 		update_mmu_cache_pud(vma, addr, pud);
pud              1082 mm/huge_memory.c 		pud_t *pud, int flags, struct dev_pagemap **pgmap)
pud              1084 mm/huge_memory.c 	unsigned long pfn = pud_pfn(*pud);
pud              1088 mm/huge_memory.c 	assert_spin_locked(pud_lockptr(mm, pud));
pud              1090 mm/huge_memory.c 	if (flags & FOLL_WRITE && !pud_write(*pud))
pud              1093 mm/huge_memory.c 	if (pud_present(*pud) && pud_devmap(*pud))
pud              1099 mm/huge_memory.c 		touch_pud(vma, addr, pud, flags);
pud              1123 mm/huge_memory.c 	pud_t pud;
pud              1131 mm/huge_memory.c 	pud = *src_pud;
pud              1132 mm/huge_memory.c 	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
pud              1140 mm/huge_memory.c 	if (is_huge_zero_pud(pud)) {
pud              1145 mm/huge_memory.c 	pud = pud_mkold(pud_wrprotect(pud));
pud              1146 mm/huge_memory.c 	set_pud_at(dst_mm, addr, dst_pud, pud);
pud              1161 mm/huge_memory.c 	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
pud              1162 mm/huge_memory.c 	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
pud              1169 mm/huge_memory.c 	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
pud              1170 mm/huge_memory.c 		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
pud              2029 mm/huge_memory.c spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
pud              2033 mm/huge_memory.c 	ptl = pud_lock(vma->vm_mm, pud);
pud              2034 mm/huge_memory.c 	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
pud              2042 mm/huge_memory.c 		 pud_t *pud, unsigned long addr)
pud              2046 mm/huge_memory.c 	ptl = __pud_trans_huge_lock(pud, vma);
pud              2055 mm/huge_memory.c 	pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
pud              2056 mm/huge_memory.c 	tlb_remove_pud_tlb_entry(tlb, pud, addr);
pud              2067 mm/huge_memory.c static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
pud              2073 mm/huge_memory.c 	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
pud              2077 mm/huge_memory.c 	pudp_huge_clear_flush_notify(vma, haddr, pud);
pud              2080 mm/huge_memory.c void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
pud              2090 mm/huge_memory.c 	ptl = pud_lock(vma->vm_mm, pud);
pud              2091 mm/huge_memory.c 	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
pud              2093 mm/huge_memory.c 	__split_huge_pud_locked(vma, pud, range.start);
pud              2349 mm/huge_memory.c 	pud_t *pud;
pud              2360 mm/huge_memory.c 	pud = pud_offset(p4d, address);
pud              2361 mm/huge_memory.c 	if (!pud_present(*pud))
pud              2364 mm/huge_memory.c 	pmd = pmd_offset(pud, address);
pud              4879 mm/hugetlb.c   pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
pud              4892 mm/hugetlb.c   		return (pte_t *)pmd_alloc(mm, pud, addr);
pud              4914 mm/hugetlb.c   	if (pud_none(*pud)) {
pud              4915 mm/hugetlb.c   		pud_populate(mm, pud,
pud              4923 mm/hugetlb.c   	pte = (pte_t *)pmd_alloc(mm, pud, addr);
pud              4944 mm/hugetlb.c   	pud_t *pud = pud_offset(p4d, *addr);
pud              4950 mm/hugetlb.c   	pud_clear(pud);
pud              4958 mm/hugetlb.c   pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
pud              4981 mm/hugetlb.c   	pud_t *pud;
pud              4988 mm/hugetlb.c   	pud = pud_alloc(mm, p4d, addr);
pud              4989 mm/hugetlb.c   	if (pud) {
pud              4991 mm/hugetlb.c   			pte = (pte_t *)pud;
pud              4994 mm/hugetlb.c   			if (want_pmd_share() && pud_none(*pud))
pud              4995 mm/hugetlb.c   				pte = huge_pmd_share(mm, addr, pud);
pud              4997 mm/hugetlb.c   				pte = (pte_t *)pmd_alloc(mm, pud, addr);
pud              5019 mm/hugetlb.c   	pud_t *pud, pud_entry;
pud              5029 mm/hugetlb.c   	pud = pud_offset(p4d, addr);
pud              5030 mm/hugetlb.c   	pud_entry = READ_ONCE(*pud);
pud              5035 mm/hugetlb.c   		return (pte_t *)pud;
pud              5037 mm/hugetlb.c   	pmd = pmd_offset(pud, addr);
pud              5108 mm/hugetlb.c   		pud_t *pud, int flags)
pud              5113 mm/hugetlb.c   	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
pud               643 mm/kasan/common.c 	pud_t *pud;
pud               652 mm/kasan/common.c 	pud = pud_offset(p4d, addr);
pud               653 mm/kasan/common.c 	if (pud_none(*pud))
pud               661 mm/kasan/common.c 	if (pud_bad(*pud))
pud               663 mm/kasan/common.c 	pmd = pmd_offset(pud, addr);
pud                62 mm/kasan/init.c static inline bool kasan_pmd_table(pud_t pud)
pud                64 mm/kasan/init.c 	return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
pud                67 mm/kasan/init.c static inline bool kasan_pmd_table(pud_t pud)
pud               113 mm/kasan/init.c static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
pud               116 mm/kasan/init.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud               149 mm/kasan/init.c 	pud_t *pud = pud_offset(p4d, addr);
pud               157 mm/kasan/init.c 			pud_populate(&init_mm, pud,
pud               159 mm/kasan/init.c 			pmd = pmd_offset(pud, addr);
pud               165 mm/kasan/init.c 		if (pud_none(*pud)) {
pud               169 mm/kasan/init.c 				p = pmd_alloc(&init_mm, pud, addr);
pud               173 mm/kasan/init.c 				pud_populate(&init_mm, pud,
pud               177 mm/kasan/init.c 		zero_pmd_populate(pud, addr, next);
pud               178 mm/kasan/init.c 	} while (pud++, addr = next, addr != end);
pud               192 mm/kasan/init.c 			pud_t *pud;
pud               197 mm/kasan/init.c 			pud = pud_offset(p4d, addr);
pud               198 mm/kasan/init.c 			pud_populate(&init_mm, pud,
pud               200 mm/kasan/init.c 			pmd = pmd_offset(pud, addr);
pud               243 mm/kasan/init.c 			pud_t *pud;
pud               270 mm/kasan/init.c 			pud = pud_offset(p4d, addr);
pud               271 mm/kasan/init.c 			pud_populate(&init_mm, pud,
pud               273 mm/kasan/init.c 			pmd = pmd_offset(pud, addr);
pud               312 mm/kasan/init.c static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
pud               323 mm/kasan/init.c 	pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
pud               324 mm/kasan/init.c 	pud_clear(pud);
pud               329 mm/kasan/init.c 	pud_t *pud;
pud               333 mm/kasan/init.c 		pud = pud_start + i;
pud               334 mm/kasan/init.c 		if (!pud_none(*pud))
pud               401 mm/kasan/init.c static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
pud               406 mm/kasan/init.c 	for (; addr < end; addr = next, pud++) {
pud               411 mm/kasan/init.c 		if (!pud_present(*pud))
pud               414 mm/kasan/init.c 		if (kasan_pmd_table(*pud)) {
pud               417 mm/kasan/init.c 				pud_clear(pud);
pud               420 mm/kasan/init.c 		pmd = pmd_offset(pud, addr);
pud               421 mm/kasan/init.c 		pmd_base = pmd_offset(pud, 0);
pud               423 mm/kasan/init.c 		kasan_free_pmd(pmd_base, pud);
pud               433 mm/kasan/init.c 		pud_t *pud;
pud               446 mm/kasan/init.c 		pud = pud_offset(p4d, addr);
pud               447 mm/kasan/init.c 		kasan_remove_pud_table(pud, addr, next);
pud               270 mm/memory-failure.c 	pud_t *pud;
pud               280 mm/memory-failure.c 	pud = pud_offset(p4d, address);
pud               281 mm/memory-failure.c 	if (!pud_present(*pud))
pud               283 mm/memory-failure.c 	if (pud_devmap(*pud))
pud               285 mm/memory-failure.c 	pmd = pmd_offset(pud, address);
pud               204 mm/memory.c    static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pud               213 mm/memory.c    	pmd = pmd_offset(pud, addr);
pud               232 mm/memory.c    	pmd = pmd_offset(pud, start);
pud               233 mm/memory.c    	pud_clear(pud);
pud               242 mm/memory.c    	pud_t *pud;
pud               247 mm/memory.c    	pud = pud_offset(p4d, addr);
pud               250 mm/memory.c    		if (pud_none_or_clear_bad(pud))
pud               252 mm/memory.c    		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
pud               253 mm/memory.c    	} while (pud++, addr = next, addr != end);
pud               266 mm/memory.c    	pud = pud_offset(p4d, start);
pud               268 mm/memory.c    	pud_free_tlb(tlb, pud, start);
pud               486 mm/memory.c    	pud_t *pud = pud_offset(p4d, addr);
pud               487 mm/memory.c    	pmd_t *pmd = pmd_offset(pud, addr);
pud              1140 mm/memory.c    				struct vm_area_struct *vma, pud_t *pud,
pud              1147 mm/memory.c    	pmd = pmd_offset(pud, addr);
pud              1179 mm/memory.c    	pud_t *pud;
pud              1182 mm/memory.c    	pud = pud_offset(p4d, addr);
pud              1185 mm/memory.c    		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
pud              1188 mm/memory.c    				split_huge_pud(vma, pud, addr);
pud              1189 mm/memory.c    			} else if (zap_huge_pud(tlb, vma, pud, addr))
pud              1193 mm/memory.c    		if (pud_none_or_clear_bad(pud))
pud              1195 mm/memory.c    		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
pud              1198 mm/memory.c    	} while (pud++, addr = next, addr != end);
pud              1397 mm/memory.c    	pud_t *pud;
pud              1404 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
pud              1405 mm/memory.c    	if (!pud)
pud              1407 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
pud              1817 mm/memory.c    static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
pud              1826 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
pud              1844 mm/memory.c    	pud_t *pud;
pud              1849 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
pud              1850 mm/memory.c    	if (!pud)
pud              1854 mm/memory.c    		err = remap_pmd_range(mm, pud, addr, next,
pud              1858 mm/memory.c    	} while (pud++, addr = next, addr != end);
pud              2035 mm/memory.c    static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
pud              2043 mm/memory.c    	BUG_ON(pud_huge(*pud));
pud              2045 mm/memory.c    	pmd = pmd_alloc(mm, pud, addr);
pud              2061 mm/memory.c    	pud_t *pud;
pud              2065 mm/memory.c    	pud = pud_alloc(mm, p4d, addr);
pud              2066 mm/memory.c    	if (!pud)
pud              2070 mm/memory.c    		err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
pud              2073 mm/memory.c    	} while (pud++, addr = next, addr != end);
pud              3930 mm/memory.c    	vmf.pud = pud_alloc(mm, p4d, address);
pud              3931 mm/memory.c    	if (!vmf.pud)
pud              3933 mm/memory.c    	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
pud              3938 mm/memory.c    		pud_t orig_pud = *vmf.pud;
pud              3956 mm/memory.c    	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
pud              4104 mm/memory.c    int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
pud              4113 mm/memory.c    	ptl = pud_lock(mm, pud);
pud              4115 mm/memory.c    	if (!pud_present(*pud)) {
pud              4117 mm/memory.c    		pud_populate(mm, pud, new);
pud              4121 mm/memory.c    	if (!pgd_present(*pud)) {
pud              4123 mm/memory.c    		pgd_populate(mm, pud, new);
pud              4138 mm/memory.c    	pud_t *pud;
pud              4150 mm/memory.c    	pud = pud_offset(p4d, address);
pud              4151 mm/memory.c    	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
pud              4154 mm/memory.c    	pmd = pmd_offset(pud, address);
pud               190 mm/mprotect.c  		pud_t *pud, unsigned long addr, unsigned long end,
pud               201 mm/mprotect.c  	pmd = pmd_offset(pud, addr);
pud               265 mm/mprotect.c  	pud_t *pud;
pud               269 mm/mprotect.c  	pud = pud_offset(p4d, addr);
pud               272 mm/mprotect.c  		if (pud_none_or_clear_bad(pud))
pud               274 mm/mprotect.c  		pages += change_pmd_range(vma, pud, addr, next, newprot,
pud               276 mm/mprotect.c  	} while (pud++, addr = next, addr != end);
pud                37 mm/mremap.c    	pud_t *pud;
pud                48 mm/mremap.c    	pud = pud_offset(p4d, addr);
pud                49 mm/mremap.c    	if (pud_none_or_clear_bad(pud))
pud                52 mm/mremap.c    	pmd = pmd_offset(pud, addr);
pud                64 mm/mremap.c    	pud_t *pud;
pud                71 mm/mremap.c    	pud = pud_alloc(mm, p4d, addr);
pud                72 mm/mremap.c    	if (!pud)
pud                75 mm/mremap.c    	pmd = pmd_alloc(mm, pud, addr);
pud               144 mm/page_vma_mapped.c 	pud_t *pud;
pud               173 mm/page_vma_mapped.c 	pud = pud_offset(p4d, pvmw->address);
pud               174 mm/page_vma_mapped.c 	if (!pud_present(*pud))
pud               176 mm/page_vma_mapped.c 	pvmw->pmd = pmd_offset(pud, pvmw->address);
pud                29 mm/pagewalk.c  static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pud                37 mm/pagewalk.c  	pmd = pmd_offset(pud, addr);
pud                78 mm/pagewalk.c  	pud_t *pud;
pud                83 mm/pagewalk.c  	pud = pud_offset(p4d, addr);
pud                87 mm/pagewalk.c  		if (pud_none(*pud) || !walk->vma) {
pud                96 mm/pagewalk.c  			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
pud                99 mm/pagewalk.c  				err = ops->pud_entry(pud, addr, next, walk);
pud               107 mm/pagewalk.c  		split_huge_pud(walk->vma, pud, addr);
pud               108 mm/pagewalk.c  		if (pud_none(*pud))
pud               112 mm/pagewalk.c  			err = walk_pmd_range(pud, addr, next, walk);
pud               115 mm/pagewalk.c  	} while (pud++, addr = next, addr != end);
pud                33 mm/pgtable-generic.c void pud_clear_bad(pud_t *pud)
pud                35 mm/pgtable-generic.c 	pud_ERROR(*pud);
pud                36 mm/pgtable-generic.c 	pud_clear(pud);
pud               140 mm/pgtable-generic.c 	pud_t pud;
pud               144 mm/pgtable-generic.c 	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
pud               146 mm/pgtable-generic.c 	return pud;
pud               715 mm/rmap.c      	pud_t *pud;
pud               727 mm/rmap.c      	pud = pud_offset(p4d, address);
pud               728 mm/rmap.c      	if (!pud_present(*pud))
pud               731 mm/rmap.c      	pmd = pmd_offset(pud, address);
pud               168 mm/sparse-vmemmap.c pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
pud               170 mm/sparse-vmemmap.c 	pmd_t *pmd = pmd_offset(pud, addr);
pud               182 mm/sparse-vmemmap.c 	pud_t *pud = pud_offset(p4d, addr);
pud               183 mm/sparse-vmemmap.c 	if (pud_none(*pud)) {
pud               187 mm/sparse-vmemmap.c 		pud_populate(&init_mm, pud, p);
pud               189 mm/sparse-vmemmap.c 	return pud;
pud               222 mm/sparse-vmemmap.c 	pud_t *pud;
pud               233 mm/sparse-vmemmap.c 		pud = vmemmap_pud_populate(p4d, addr, node);
pud               234 mm/sparse-vmemmap.c 		if (!pud)
pud               236 mm/sparse-vmemmap.c 		pmd = vmemmap_pmd_populate(pud, addr, node);
pud              1977 mm/swapfile.c  static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pud              1986 mm/swapfile.c  	pmd = pmd_offset(pud, addr);
pud              2005 mm/swapfile.c  	pud_t *pud;
pud              2009 mm/swapfile.c  	pud = pud_offset(p4d, addr);
pud              2012 mm/swapfile.c  		if (pud_none_or_clear_bad(pud))
pud              2014 mm/swapfile.c  		ret = unuse_pmd_range(vma, pud, addr, next, type,
pud              2018 mm/swapfile.c  	} while (pud++, addr = next, addr != end);
pud               151 mm/userfaultfd.c 	pud_t *pud;
pud               157 mm/userfaultfd.c 	pud = pud_alloc(mm, p4d, address);
pud               158 mm/userfaultfd.c 	if (!pud)
pud               165 mm/userfaultfd.c 	return pmd_alloc(mm, pud, address);
pud                75 mm/vmalloc.c   static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
pud                80 mm/vmalloc.c   	pmd = pmd_offset(pud, addr);
pud                93 mm/vmalloc.c   	pud_t *pud;
pud                96 mm/vmalloc.c   	pud = pud_offset(p4d, addr);
pud                99 mm/vmalloc.c   		if (pud_clear_huge(pud))
pud               101 mm/vmalloc.c   		if (pud_none_or_clear_bad(pud))
pud               103 mm/vmalloc.c   		vunmap_pmd_range(pud, addr, next);
pud               104 mm/vmalloc.c   	} while (pud++, addr = next, addr != end);
pud               164 mm/vmalloc.c   static int vmap_pmd_range(pud_t *pud, unsigned long addr,
pud               170 mm/vmalloc.c   	pmd = pmd_alloc(&init_mm, pud, addr);
pud               184 mm/vmalloc.c   	pud_t *pud;
pud               187 mm/vmalloc.c   	pud = pud_alloc(&init_mm, p4d, addr);
pud               188 mm/vmalloc.c   	if (!pud)
pud               192 mm/vmalloc.c   		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
pud               194 mm/vmalloc.c   	} while (pud++, addr = next, addr != end);
pud               276 mm/vmalloc.c   	pud_t *pud;
pud               291 mm/vmalloc.c   	pud = pud_offset(p4d, addr);
pud               301 mm/vmalloc.c   	WARN_ON_ONCE(pud_bad(*pud));
pud               302 mm/vmalloc.c   	if (pud_none(*pud) || pud_bad(*pud))
pud               304 mm/vmalloc.c   	pmd = pmd_offset(pud, addr);
pud                82 virt/kvm/arm/mmu.c static void kvm_flush_dcache_pud(pud_t pud)
pud                84 virt/kvm/arm/mmu.c 	__kvm_flush_dcache_pud(pud);
pud               169 virt/kvm/arm/mmu.c static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
pud               171 virt/kvm/arm/mmu.c 	pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
pud               172 virt/kvm/arm/mmu.c 	VM_BUG_ON(stage2_pud_huge(kvm, *pud));
pud               173 virt/kvm/arm/mmu.c 	stage2_pud_clear(kvm, pud);
pud               176 virt/kvm/arm/mmu.c 	put_page(virt_to_page(pud));
pud               268 virt/kvm/arm/mmu.c static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
pud               274 virt/kvm/arm/mmu.c 	start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
pud               294 virt/kvm/arm/mmu.c 		clear_stage2_pud_entry(kvm, pud, start_addr);
pud               301 virt/kvm/arm/mmu.c 	pud_t *pud, *start_pud;
pud               303 virt/kvm/arm/mmu.c 	start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
pud               306 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
pud               307 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud)) {
pud               308 virt/kvm/arm/mmu.c 				pud_t old_pud = *pud;
pud               310 virt/kvm/arm/mmu.c 				stage2_pud_clear(kvm, pud);
pud               313 virt/kvm/arm/mmu.c 				put_page(virt_to_page(pud));
pud               315 virt/kvm/arm/mmu.c 				unmap_stage2_pmds(kvm, pud, addr, next);
pud               318 virt/kvm/arm/mmu.c 	} while (pud++, addr = next, addr != end);
pud               377 virt/kvm/arm/mmu.c static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
pud               383 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
pud               398 virt/kvm/arm/mmu.c 	pud_t *pud;
pud               401 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
pud               404 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
pud               405 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud))
pud               406 virt/kvm/arm/mmu.c 				kvm_flush_dcache_pud(*pud);
pud               408 virt/kvm/arm/mmu.c 				stage2_flush_pmds(kvm, pud, addr, next);
pud               410 virt/kvm/arm/mmu.c 	} while (pud++, addr = next, addr != end);
pud               461 virt/kvm/arm/mmu.c static void clear_hyp_pud_entry(pud_t *pud)
pud               463 virt/kvm/arm/mmu.c 	pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
pud               464 virt/kvm/arm/mmu.c 	VM_BUG_ON(pud_huge(*pud));
pud               465 virt/kvm/arm/mmu.c 	pud_clear(pud);
pud               467 virt/kvm/arm/mmu.c 	put_page(virt_to_page(pud));
pud               495 virt/kvm/arm/mmu.c static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
pud               500 virt/kvm/arm/mmu.c 	start_pmd = pmd = pmd_offset(pud, addr);
pud               509 virt/kvm/arm/mmu.c 		clear_hyp_pud_entry(pud);
pud               515 virt/kvm/arm/mmu.c 	pud_t *pud, *start_pud;
pud               517 virt/kvm/arm/mmu.c 	start_pud = pud = pud_offset(pgd, addr);
pud               521 virt/kvm/arm/mmu.c 		if (!pud_none(*pud))
pud               522 virt/kvm/arm/mmu.c 			unmap_hyp_pmds(pud, addr, next);
pud               523 virt/kvm/arm/mmu.c 	} while (pud++, addr = next, addr != end);
pud               626 virt/kvm/arm/mmu.c static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
pud               636 virt/kvm/arm/mmu.c 		pmd = pmd_offset(pud, addr);
pud               663 virt/kvm/arm/mmu.c 	pud_t *pud;
pud               670 virt/kvm/arm/mmu.c 		pud = pud_offset(pgd, addr);
pud               672 virt/kvm/arm/mmu.c 		if (pud_none_or_clear_bad(pud)) {
pud               678 virt/kvm/arm/mmu.c 			kvm_pud_populate(pud, pmd);
pud               679 virt/kvm/arm/mmu.c 			get_page(virt_to_page(pud));
pud               683 virt/kvm/arm/mmu.c 		ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
pud               697 virt/kvm/arm/mmu.c 	pud_t *pud;
pud               708 virt/kvm/arm/mmu.c 			pud = pud_alloc_one(NULL, addr);
pud               709 virt/kvm/arm/mmu.c 			if (!pud) {
pud               714 virt/kvm/arm/mmu.c 			kvm_pgd_populate(pgd, pud);
pud              1020 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              1026 virt/kvm/arm/mmu.c 		pud = mmu_memory_cache_alloc(cache);
pud              1027 virt/kvm/arm/mmu.c 		stage2_pgd_populate(kvm, pgd, pud);
pud              1037 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              1040 virt/kvm/arm/mmu.c 	pud = stage2_get_pud(kvm, cache, addr);
pud              1041 virt/kvm/arm/mmu.c 	if (!pud || stage2_pud_huge(kvm, *pud))
pud              1044 virt/kvm/arm/mmu.c 	if (stage2_pud_none(kvm, *pud)) {
pud              1048 virt/kvm/arm/mmu.c 		stage2_pud_populate(kvm, pud, pmd);
pud              1049 virt/kvm/arm/mmu.c 		get_page(virt_to_page(pud));
pud              1052 virt/kvm/arm/mmu.c 	return stage2_pmd_offset(kvm, pud, addr);
pud              1225 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              1234 virt/kvm/arm/mmu.c 	pud = stage2_get_pud(kvm, cache, addr);
pud              1235 virt/kvm/arm/mmu.c 	if (!pud) {
pud              1248 virt/kvm/arm/mmu.c 		stage2_dissolve_pud(kvm, addr, pud);
pud              1250 virt/kvm/arm/mmu.c 	if (stage2_pud_none(kvm, *pud)) {
pud              1254 virt/kvm/arm/mmu.c 		stage2_pud_populate(kvm, pud, pmd);
pud              1255 virt/kvm/arm/mmu.c 		get_page(virt_to_page(pud));
pud              1258 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
pud              1326 virt/kvm/arm/mmu.c static int stage2_pudp_test_and_clear_young(pud_t *pud)
pud              1328 virt/kvm/arm/mmu.c 	return stage2_ptep_test_and_clear_young((pte_t *)pud);
pud              1449 virt/kvm/arm/mmu.c static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
pud              1455 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
pud              1479 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              1482 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
pud              1485 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
pud              1486 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud)) {
pud              1487 virt/kvm/arm/mmu.c 				if (!kvm_s2pud_readonly(pud))
pud              1488 virt/kvm/arm/mmu.c 					kvm_set_s2pud_readonly(pud);
pud              1490 virt/kvm/arm/mmu.c 				stage2_wp_pmds(kvm, pud, addr, next);
pud              1493 virt/kvm/arm/mmu.c 	} while (pud++, addr = next, addr != end);
pud              1863 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              1873 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
pud              1876 virt/kvm/arm/mmu.c 	if (pud) {		/* HugeTLB */
pud              1877 virt/kvm/arm/mmu.c 		*pud = kvm_s2pud_mkyoung(*pud);
pud              1878 virt/kvm/arm/mmu.c 		pfn = kvm_pud_pfn(*pud);
pud              2103 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              2108 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
pud              2111 virt/kvm/arm/mmu.c 	if (pud)
pud              2112 virt/kvm/arm/mmu.c 		return stage2_pudp_test_and_clear_young(pud);
pud              2121 virt/kvm/arm/mmu.c 	pud_t *pud;
pud              2126 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
pud              2129 virt/kvm/arm/mmu.c 	if (pud)
pud              2130 virt/kvm/arm/mmu.c 		return kvm_s2pud_young(*pud);