pmd 75 arch/alpha/include/asm/mmzone.h #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) pmd 32 arch/alpha/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 37 arch/alpha/include/asm/page.h #define pmd_val(x) ((x).pmd) pmd 17 arch/alpha/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) pmd 19 arch/alpha/include/asm/pgalloc.h pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); pmd 21 arch/alpha/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 24 arch/alpha/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd 26 arch/alpha/include/asm/pgalloc.h pmd_set(pmd, pte); pmd 30 arch/alpha/include/asm/pgalloc.h pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pmd 32 arch/alpha/include/asm/pgalloc.h pgd_set(pgd, pmd); pmd 51 arch/alpha/include/asm/pgalloc.h pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 53 arch/alpha/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 234 arch/alpha/include/asm/pgtable.h pmd_page_vaddr(pmd_t pmd) pmd 236 arch/alpha/include/asm/pgtable.h return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; pmd 240 arch/alpha/include/asm/pgtable.h #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32)) pmd 254 arch/alpha/include/asm/pgtable.h extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } pmd 255 arch/alpha/include/asm/pgtable.h extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } pmd 256 arch/alpha/include/asm/pgtable.h extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } pmd 8 arch/alpha/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) pmd 149 arch/alpha/mm/init.c pmd_t *pmd; pmd 188 arch/alpha/mm/init.c pmd = pmd_offset(pgd, VMALLOC_START); pmd 189 arch/alpha/mm/init.c pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); pmd 217 arch/alpha/mm/init.c if (pmd != pmd_offset(pgd, vaddr)) { pmd 219 arch/alpha/mm/init.c pmd = pmd_offset(pgd, vaddr); pmd 220 arch/alpha/mm/init.c pmd_set(pmd, (pte_t *)kernel_end); pmd 223 arch/alpha/mm/init.c set_pte(pte_offset_kernel(pmd, vaddr), pmd 14 arch/arc/include/asm/hugepage.h static inline pte_t pmd_pte(pmd_t pmd) pmd 16 arch/arc/include/asm/hugepage.h return __pte(pmd_val(pmd)); pmd 24 arch/arc/include/asm/hugepage.h #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) pmd 25 arch/arc/include/asm/hugepage.h #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) pmd 26 arch/arc/include/asm/hugepage.h #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) pmd 27 arch/arc/include/asm/hugepage.h #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) pmd 28 arch/arc/include/asm/hugepage.h #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) pmd 29 arch/arc/include/asm/hugepage.h #define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd))) pmd 30 arch/arc/include/asm/hugepage.h #define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd))) pmd 31 arch/arc/include/asm/hugepage.h #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) pmd 33 arch/arc/include/asm/hugepage.h #define pmd_write(pmd) pte_write(pmd_pte(pmd)) pmd 34 arch/arc/include/asm/hugepage.h #define pmd_young(pmd) pte_young(pmd_pte(pmd)) pmd 35 arch/arc/include/asm/hugepage.h #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) pmd 36 arch/arc/include/asm/hugepage.h #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) pmd 40 arch/arc/include/asm/hugepage.h #define pmd_trans_huge(pmd) (pmd_val(pmd) & _PAGE_HW_SZ) pmd 44 arch/arc/include/asm/hugepage.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 50 arch/arc/include/asm/hugepage.h return __pmd((pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HW_SZ)) | pgprot_val(newprot)); pmd 54 arch/arc/include/asm/hugepage.h pmd_t *pmdp, pmd_t pmd) pmd 56 arch/arc/include/asm/hugepage.h *pmdp = pmd; pmd 60 arch/arc/include/asm/hugepage.h pmd_t *pmd); pmd 36 arch/arc/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd 38 arch/arc/include/asm/pgalloc.h pmd_set(pmd, pte); pmd 42 arch/arc/include/asm/pgalloc.h pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) pmd 44 arch/arc/include/asm/pgalloc.h pmd_set(pmd, (pte_t *) ptep); pmd 132 arch/arc/include/asm/pgalloc.h #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) pmd 259 arch/arc/include/asm/pgtable.h #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) pmd 262 arch/arc/include/asm/pgtable.h #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) pmd 34 arch/arc/mm/fault.c pmd_t *pmd, *pmd_k; pmd 47 arch/arc/mm/fault.c pmd = pmd_offset(pud, address); pmd 52 arch/arc/mm/fault.c set_pmd(pmd, *pmd_k); pmd 657 arch/arc/mm/tlb.c pmd_t *pmd) pmd 659 arch/arc/mm/tlb.c pte_t pte = __pte(pmd_val(*pmd)); pmd 80 arch/arm/include/asm/kvm_mmu.h #define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) pmd 140 arch/arm/include/asm/kvm_mmu.h static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) pmd 142 arch/arm/include/asm/kvm_mmu.h pmd_val(pmd) |= L_PMD_S2_RDWR; pmd 143 arch/arm/include/asm/kvm_mmu.h return pmd; pmd 152 arch/arm/include/asm/kvm_mmu.h static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) pmd 154 arch/arm/include/asm/kvm_mmu.h pmd_val(pmd) &= ~PMD_SECT_XN; pmd 155 arch/arm/include/asm/kvm_mmu.h return pmd; pmd 173 arch/arm/include/asm/kvm_mmu.h static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) pmd 175 arch/arm/include/asm/kvm_mmu.h pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; pmd 178 arch/arm/include/asm/kvm_mmu.h static inline bool kvm_s2pmd_readonly(pmd_t *pmd) pmd 180 arch/arm/include/asm/kvm_mmu.h return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; pmd 183 arch/arm/include/asm/kvm_mmu.h static inline bool kvm_s2pmd_exec(pmd_t *pmd) pmd 185 arch/arm/include/asm/kvm_mmu.h return !(pmd_val(*pmd) & PMD_SECT_XN); pmd 310 arch/arm/include/asm/kvm_mmu.h static inline void __kvm_flush_dcache_pmd(pmd_t pmd) pmd 313 arch/arm/include/asm/kvm_mmu.h kvm_pfn_t pfn = pmd_pfn(pmd); pmd 30 arch/arm/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 32 arch/arm/include/asm/pgalloc.h BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pmd 33 arch/arm/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 36 arch/arm/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 38 arch/arm/include/asm/pgalloc.h set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); pmd 47 arch/arm/include/asm/pgalloc.h #define pmd_free(mm, pmd) do { } while (0) pmd 48 arch/arm/include/asm/pgalloc.h #define pud_populate(mm,pmd,pte) BUG() pmd 150 arch/arm/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 22 arch/arm/include/asm/pgtable-2level-types.h typedef struct { pmdval_t pmd; } pmd_t; pmd 27 arch/arm/include/asm/pgtable-2level-types.h #define pmd_val(x) ((x).pmd) pmd 191 arch/arm/include/asm/pgtable-2level.h #define pmd_large(pmd) (pmd_val(pmd) & 2) pmd 192 arch/arm/include/asm/pgtable-2level.h #define pmd_bad(pmd) (pmd_val(pmd) & 2) pmd 193 arch/arm/include/asm/pgtable-2level.h #define pmd_present(pmd) (pmd_val(pmd)) pmd 220 arch/arm/include/asm/pgtable-2level.h #define pmd_hugewillfault(pmd) (0) pmd 221 arch/arm/include/asm/pgtable-2level.h #define pmd_thp_or_huge(pmd) (0) pmd 25 arch/arm/include/asm/pgtable-3level-types.h typedef struct { pmdval_t pmd; } pmd_t; pmd 30 arch/arm/include/asm/pgtable-3level-types.h #define pmd_val(x) ((x).pmd) pmd 132 arch/arm/include/asm/pgtable-3level.h #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ pmd 134 arch/arm/include/asm/pgtable-3level.h #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ pmd 136 arch/arm/include/asm/pgtable-3level.h #define pmd_large(pmd) pmd_sect(pmd) pmd 162 arch/arm/include/asm/pgtable-3level.h #define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) pmd 198 arch/arm/include/asm/pgtable-3level.h #define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \ pmd 199 arch/arm/include/asm/pgtable-3level.h : !!(pmd_val(pmd) & (val))) pmd 200 arch/arm/include/asm/pgtable-3level.h #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) pmd 202 arch/arm/include/asm/pgtable-3level.h #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) pmd 203 arch/arm/include/asm/pgtable-3level.h #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) pmd 211 arch/arm/include/asm/pgtable-3level.h #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) pmd 212 arch/arm/include/asm/pgtable-3level.h #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) pmd 216 arch/arm/include/asm/pgtable-3level.h #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) pmd 217 arch/arm/include/asm/pgtable-3level.h #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) pmd 220 arch/arm/include/asm/pgtable-3level.h #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) pmd 224 arch/arm/include/asm/pgtable-3level.h static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } pmd 233 arch/arm/include/asm/pgtable-3level.h #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) pmd 235 arch/arm/include/asm/pgtable-3level.h #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) pmd 243 arch/arm/include/asm/pgtable-3level.h static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd 245 arch/arm/include/asm/pgtable-3level.h return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); pmd 248 arch/arm/include/asm/pgtable-3level.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 252 arch/arm/include/asm/pgtable-3level.h pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); pmd 253 arch/arm/include/asm/pgtable-3level.h return pmd; pmd 257 arch/arm/include/asm/pgtable-3level.h pmd_t *pmdp, pmd_t pmd) pmd 262 arch/arm/include/asm/pgtable-3level.h if (pmd_val(pmd) & L_PMD_SECT_NONE) pmd 263 arch/arm/include/asm/pgtable-3level.h pmd_val(pmd) &= ~L_PMD_SECT_VALID; pmd 265 arch/arm/include/asm/pgtable-3level.h if (pmd_write(pmd) && pmd_dirty(pmd)) pmd 266 arch/arm/include/asm/pgtable-3level.h pmd_val(pmd) &= ~PMD_SECT_AP2; pmd 268 arch/arm/include/asm/pgtable-3level.h pmd_val(pmd) |= PMD_SECT_AP2; pmd 270 arch/arm/include/asm/pgtable-3level.h *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); pmd 54 arch/arm/include/asm/pgtable.h #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) pmd 187 arch/arm/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 189 arch/arm/include/asm/pgtable.h static inline pte_t *pmd_page_vaddr(pmd_t pmd) pmd 191 arch/arm/include/asm/pgtable.h return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); pmd 194 arch/arm/include/asm/pgtable.h #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) pmd 197 arch/arm/include/asm/pgtable.h #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) pmd 200 arch/arm/include/asm/pgtable.h #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) pmd 206 arch/arm/include/asm/pgtable.h #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) pmd 208 arch/arm/include/asm/pgtable.h #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) pmd 29 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd) pmd 31 arch/arm/include/asm/stage2_pgtable.h #define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd) pmd 577 arch/arm/include/asm/tlbflush.h static inline void flush_pmd_entry(void *pmd) pmd 581 arch/arm/include/asm/tlbflush.h tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); pmd 582 arch/arm/include/asm/tlbflush.h tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); pmd 588 arch/arm/include/asm/tlbflush.h static inline void clean_pmd_entry(void *pmd) pmd 592 arch/arm/include/asm/tlbflush.h tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); pmd 593 arch/arm/include/asm/tlbflush.h tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); pmd 641 arch/arm/include/asm/tlbflush.h #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) pmd 749 arch/arm/kernel/traps.c void __pmd_error(const char *file, int line, pmd_t pmd) pmd 751 arch/arm/kernel/traps.c pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); pmd 27 arch/arm/lib/uaccess_with_memcpy.c pmd_t *pmd; pmd 40 arch/arm/lib/uaccess_with_memcpy.c pmd = pmd_offset(pud, addr); pmd 41 arch/arm/lib/uaccess_with_memcpy.c if (unlikely(pmd_none(*pmd))) pmd 54 arch/arm/lib/uaccess_with_memcpy.c if (unlikely(pmd_thp_or_huge(*pmd))) { pmd 57 arch/arm/lib/uaccess_with_memcpy.c if (unlikely(!pmd_thp_or_huge(*pmd) pmd 58 arch/arm/lib/uaccess_with_memcpy.c || pmd_hugewillfault(*pmd))) { pmd 68 arch/arm/lib/uaccess_with_memcpy.c if (unlikely(pmd_bad(*pmd))) pmd 71 arch/arm/lib/uaccess_with_memcpy.c pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); pmd 634 arch/arm/mach-sa1100/assabet.c pmd_t *pmd; pmd 636 arch/arm/mach-sa1100/assabet.c pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); pmd 637 arch/arm/mach-sa1100/assabet.c *pmd = __pmd(phys | prot); pmd 638 arch/arm/mach-sa1100/assabet.c flush_pmd_entry(pmd); pmd 302 arch/arm/mm/dump.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start, pmd 305 arch/arm/mm/dump.c pte_t *pte = pte_offset_kernel(pmd, 0); pmd 315 arch/arm/mm/dump.c static const char *get_domain_name(pmd_t *pmd) pmd 318 arch/arm/mm/dump.c switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) { pmd 336 arch/arm/mm/dump.c pmd_t *pmd = pmd_offset(pud, 0); pmd 341 arch/arm/mm/dump.c for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { pmd 343 arch/arm/mm/dump.c domain = get_domain_name(pmd); pmd 344 arch/arm/mm/dump.c if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) pmd 345 arch/arm/mm/dump.c note_page(st, addr, 3, pmd_val(*pmd), domain); pmd 347 arch/arm/mm/dump.c walk_pte(st, pmd, addr, domain); pmd 349 arch/arm/mm/dump.c if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) { pmd 351 arch/arm/mm/dump.c pmd++; pmd 352 arch/arm/mm/dump.c domain = get_domain_name(pmd); pmd 353 arch/arm/mm/dump.c note_page(st, addr, 3, pmd_val(*pmd), domain); pmd 95 arch/arm/mm/fault-armv.c pmd_t *pmd; pmd 107 arch/arm/mm/fault-armv.c pmd = pmd_offset(pud, address); pmd 108 arch/arm/mm/fault-armv.c if (pmd_none_or_clear_bad(pmd)) pmd 116 arch/arm/mm/fault-armv.c ptl = pte_lockptr(vma->vm_mm, pmd); pmd 117 arch/arm/mm/fault-armv.c pte = pte_offset_map(pmd, address); pmd 47 arch/arm/mm/fault.c pmd_t *pmd; pmd 70 arch/arm/mm/fault.c pmd = pmd_offset(pud, addr); pmd 72 arch/arm/mm/fault.c pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd)); pmd 74 arch/arm/mm/fault.c if (pmd_none(*pmd)) pmd 77 arch/arm/mm/fault.c if (pmd_bad(*pmd)) { pmd 83 arch/arm/mm/fault.c if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) pmd 86 arch/arm/mm/fault.c pte = pte_offset_map(pmd, addr); pmd 412 arch/arm/mm/fault.c pmd_t *pmd, *pmd_k; pmd 438 arch/arm/mm/fault.c pmd = pmd_offset(pud, addr); pmd 460 arch/arm/mm/fault.c copy_pmd(pmd, pmd_k); pmd 32 arch/arm/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 34 arch/arm/mm/hugetlbpage.c return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); pmd 27 arch/arm/mm/idmap.c pmd_t *pmd; pmd 31 arch/arm/mm/idmap.c pmd = pmd_alloc_one(&init_mm, addr); pmd 32 arch/arm/mm/idmap.c if (!pmd) { pmd 41 arch/arm/mm/idmap.c memcpy(pmd, pmd_offset(pud, 0), pmd 43 arch/arm/mm/idmap.c pud_populate(&init_mm, pud, pmd); pmd 44 arch/arm/mm/idmap.c pmd += pmd_index(addr); pmd 46 arch/arm/mm/idmap.c pmd = pmd_offset(pud, addr); pmd 50 arch/arm/mm/idmap.c *pmd = __pmd((addr & PMD_MASK) | prot); pmd 51 arch/arm/mm/idmap.c flush_pmd_entry(pmd); pmd 52 arch/arm/mm/idmap.c } while (pmd++, addr = next, addr != end); pmd 58 arch/arm/mm/idmap.c pmd_t *pmd = pmd_offset(pud, addr); pmd 61 arch/arm/mm/idmap.c pmd[0] = __pmd(addr); pmd 63 arch/arm/mm/idmap.c pmd[1] = __pmd(addr); pmd 64 arch/arm/mm/idmap.c flush_pmd_entry(pmd); pmd 571 arch/arm/mm/init.c pmd_t *pmd; pmd 573 arch/arm/mm/init.c pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); pmd 576 arch/arm/mm/init.c pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); pmd 579 arch/arm/mm/init.c pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); pmd 581 arch/arm/mm/init.c pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); pmd 583 arch/arm/mm/init.c flush_pmd_entry(pmd); pmd 153 arch/arm/mm/ioremap.c pmd_t pmd = *pmdp; pmd 155 arch/arm/mm/ioremap.c if (!pmd_none(pmd)) { pmd 169 arch/arm/mm/ioremap.c if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pmd 170 arch/arm/mm/ioremap.c pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); pmd 194 arch/arm/mm/ioremap.c pmd_t *pmd; pmd 204 arch/arm/mm/ioremap.c pmd = pmd_offset(pud, addr); pmd 206 arch/arm/mm/ioremap.c pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pmd 208 arch/arm/mm/ioremap.c pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pmd 210 arch/arm/mm/ioremap.c flush_pmd_entry(pmd); pmd 213 arch/arm/mm/ioremap.c pmd += 2; pmd 226 arch/arm/mm/ioremap.c pmd_t *pmd; pmd 236 arch/arm/mm/ioremap.c pmd = pmd_offset(pud, addr); pmd 245 arch/arm/mm/ioremap.c pmd[0] = __pmd(super_pmd_val); pmd 246 arch/arm/mm/ioremap.c pmd[1] = __pmd(super_pmd_val); pmd 247 arch/arm/mm/ioremap.c flush_pmd_entry(pmd); pmd 250 arch/arm/mm/ioremap.c pmd += 2; pmd 76 arch/arm/mm/mmu.c pmdval_t pmd; pmd 93 arch/arm/mm/mmu.c .pmd = PMD_SECT_UNCACHED, pmd 99 arch/arm/mm/mmu.c .pmd = PMD_SECT_BUFFERED, pmd 105 arch/arm/mm/mmu.c .pmd = PMD_SECT_WT, pmd 111 arch/arm/mm/mmu.c .pmd = PMD_SECT_WB, pmd 117 arch/arm/mm/mmu.c .pmd = PMD_SECT_WBWA, pmd 133 arch/arm/mm/mmu.c void __init init_default_cache_policy(unsigned long pmd) pmd 137 arch/arm/mm/mmu.c initial_pmd_value = pmd; pmd 139 arch/arm/mm/mmu.c pmd &= PMD_SECT_CACHE_MASK; pmd 142 arch/arm/mm/mmu.c if (cache_policies[i].pmd == pmd) { pmd 379 arch/arm/mm/mmu.c pmd_t *pmd = pmd_offset(pud, addr); pmd 381 arch/arm/mm/mmu.c return pmd; pmd 386 arch/arm/mm/mmu.c pmd_t *pmd; pmd 395 arch/arm/mm/mmu.c pmd = fixmap_pmd(FIXADDR_TOP); pmd 396 arch/arm/mm/mmu.c pmd_populate_kernel(&init_mm, pmd, bm_pte); pmd 675 arch/arm/mm/mmu.c mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; pmd 677 arch/arm/mm/mmu.c mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; pmd 681 arch/arm/mm/mmu.c mem_types[MT_ROM].prot_sect |= cp->pmd; pmd 683 arch/arm/mm/mmu.c switch (cp->pmd) { pmd 739 arch/arm/mm/mmu.c static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr, pmd 743 arch/arm/mm/mmu.c if (pmd_none(*pmd)) { pmd 745 arch/arm/mm/mmu.c __pmd_populate(pmd, __pa(pte), prot); pmd 747 arch/arm/mm/mmu.c BUG_ON(pmd_bad(*pmd)); pmd 748 arch/arm/mm/mmu.c return pte_offset_kernel(pmd, addr); pmd 751 arch/arm/mm/mmu.c static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, pmd 754 arch/arm/mm/mmu.c return arm_pte_alloc(pmd, addr, prot, early_alloc); pmd 757 arch/arm/mm/mmu.c static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, pmd 763 arch/arm/mm/mmu.c pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); pmd 771 arch/arm/mm/mmu.c static void __init __map_init_section(pmd_t *pmd, unsigned long addr, pmd 775 arch/arm/mm/mmu.c pmd_t *p = pmd; pmd 788 arch/arm/mm/mmu.c pmd++; pmd 791 arch/arm/mm/mmu.c *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); pmd 793 arch/arm/mm/mmu.c } while (pmd++, addr += SECTION_SIZE, addr != end); pmd 803 arch/arm/mm/mmu.c pmd_t *pmd = pmd_offset(pud, addr); pmd 819 arch/arm/mm/mmu.c __map_init_section(pmd, addr, next, phys, type, ng); pmd 821 arch/arm/mm/mmu.c alloc_init_pte(pmd, addr, next, pmd 827 arch/arm/mm/mmu.c } while (pmd++, addr = next, addr != end); pmd 893 arch/arm/mm/mmu.c pmd_t *pmd = pmd_offset(pud, addr); pmd 897 arch/arm/mm/mmu.c *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | pmd 1062 arch/arm/mm/mmu.c pmd_t *pmd; pmd 1076 arch/arm/mm/mmu.c pmd = pmd_off_k(addr); pmd 1077 arch/arm/mm/mmu.c if (pmd_none(*pmd)) pmd 1088 arch/arm/mm/mmu.c pmd = pmd_off_k(addr) + 1; pmd 1089 arch/arm/mm/mmu.c if (pmd_none(*pmd)) pmd 120 arch/arm/mm/pgd.c pmd_t *pmd; pmd 134 arch/arm/mm/pgd.c pmd = pmd_offset(pud, 0); pmd 135 arch/arm/mm/pgd.c if (pmd_none_or_clear_bad(pmd)) pmd 138 arch/arm/mm/pgd.c pte = pmd_pgtable(*pmd); pmd 139 arch/arm/mm/pgd.c pmd_clear(pmd); pmd 144 arch/arm/mm/pgd.c pmd_free(mm, pmd); pmd 162 arch/arm/mm/pgd.c pmd = pmd_offset(pud, 0); pmd 164 arch/arm/mm/pgd.c pmd_free(mm, pmd); pmd 185 arch/arm64/include/asm/kvm_mmu.h #define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd) pmd 194 arch/arm64/include/asm/kvm_mmu.h static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) pmd 196 arch/arm64/include/asm/kvm_mmu.h pmd_val(pmd) |= PMD_S2_RDWR; pmd 197 arch/arm64/include/asm/kvm_mmu.h return pmd; pmd 212 arch/arm64/include/asm/kvm_mmu.h static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) pmd 214 arch/arm64/include/asm/kvm_mmu.h pmd_val(pmd) &= ~PMD_S2_XN; pmd 215 arch/arm64/include/asm/kvm_mmu.h return pmd; pmd 349 arch/arm64/include/asm/kvm_mmu.h static inline void __kvm_flush_dcache_pmd(pmd_t pmd) pmd 352 arch/arm64/include/asm/kvm_mmu.h struct page *page = pmd_page(pmd); pmd 119 arch/arm64/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 27 arch/arm64/include/asm/pgtable-types.h typedef struct { pmdval_t pmd; } pmd_t; pmd 28 arch/arm64/include/asm/pgtable-types.h #define pmd_val(x) ((x).pmd) pmd 121 arch/arm64/include/asm/pgtable.h #define pmd_access_permitted(pmd, write) \ pmd 122 arch/arm64/include/asm/pgtable.h (pte_access_permitted(pmd_pte(pmd), (write))) pmd 201 arch/arm64/include/asm/pgtable.h static inline pmd_t pmd_mkcont(pmd_t pmd) pmd 203 arch/arm64/include/asm/pgtable.h return __pmd(pmd_val(pmd) | PMD_SECT_CONT); pmd 316 arch/arm64/include/asm/pgtable.h static inline pte_t pmd_pte(pmd_t pmd) pmd 318 arch/arm64/include/asm/pgtable.h return __pte(pmd_val(pmd)); pmd 345 arch/arm64/include/asm/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 347 arch/arm64/include/asm/pgtable.h return pte_protnone(pmd_pte(pmd)); pmd 356 arch/arm64/include/asm/pgtable.h #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) pmd 359 arch/arm64/include/asm/pgtable.h #define pmd_present(pmd) pte_present(pmd_pte(pmd)) pmd 360 arch/arm64/include/asm/pgtable.h #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) pmd 361 arch/arm64/include/asm/pgtable.h #define pmd_young(pmd) pte_young(pmd_pte(pmd)) pmd 362 arch/arm64/include/asm/pgtable.h #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) pmd 363 arch/arm64/include/asm/pgtable.h #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) pmd 364 arch/arm64/include/asm/pgtable.h #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) pmd 365 arch/arm64/include/asm/pgtable.h #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) pmd 366 arch/arm64/include/asm/pgtable.h #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) pmd 367 arch/arm64/include/asm/pgtable.h #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) pmd 368 arch/arm64/include/asm/pgtable.h #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) pmd 369 arch/arm64/include/asm/pgtable.h #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) pmd 371 arch/arm64/include/asm/pgtable.h #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) pmd 373 arch/arm64/include/asm/pgtable.h #define pmd_write(pmd) pte_write(pmd_pte(pmd)) pmd 375 arch/arm64/include/asm/pgtable.h #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) pmd 378 arch/arm64/include/asm/pgtable.h #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) pmd 380 arch/arm64/include/asm/pgtable.h static inline pmd_t pmd_mkdevmap(pmd_t pmd) pmd 382 arch/arm64/include/asm/pgtable.h return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); pmd 385 arch/arm64/include/asm/pgtable.h #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) pmd 387 arch/arm64/include/asm/pgtable.h #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) pmd 402 arch/arm64/include/asm/pgtable.h #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) pmd 436 arch/arm64/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 438 arch/arm64/include/asm/pgtable.h #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) pmd 440 arch/arm64/include/asm/pgtable.h #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ pmd 442 arch/arm64/include/asm/pgtable.h #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ pmd 469 arch/arm64/include/asm/pgtable.h static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 473 arch/arm64/include/asm/pgtable.h set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); pmd 478 arch/arm64/include/asm/pgtable.h WRITE_ONCE(*pmdp, pmd); pmd 480 arch/arm64/include/asm/pgtable.h if (pmd_valid(pmd)) { pmd 491 arch/arm64/include/asm/pgtable.h static inline phys_addr_t pmd_page_paddr(pmd_t pmd) pmd 493 arch/arm64/include/asm/pgtable.h return __pmd_to_phys(pmd); pmd 507 arch/arm64/include/asm/pgtable.h #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) pmd 510 arch/arm64/include/asm/pgtable.h #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd))) pmd 523 arch/arm64/include/asm/pgtable.h #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) pmd 669 arch/arm64/include/asm/pgtable.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 671 arch/arm64/include/asm/pgtable.h return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); pmd 806 arch/arm64/include/asm/pgtable.h unsigned long address, pmd_t *pmdp, pmd_t pmd) pmd 808 arch/arm64/include/asm/pgtable.h return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); pmd 856 arch/arm64/include/asm/pgtable.h #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) pmd 166 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd) pmd 169 arch/arm64/include/asm/stage2_pgtable.h pud_populate(NULL, pud, pmd); pmd 181 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd) pmd 184 arch/arm64/include/asm/stage2_pgtable.h free_page((unsigned long)pmd); pmd 408 arch/arm64/kernel/hibernate.c pmd_t pmd = READ_ONCE(*src_pmdp); pmd 411 arch/arm64/kernel/hibernate.c if (pmd_none(pmd)) pmd 413 arch/arm64/kernel/hibernate.c if (pmd_table(pmd)) { pmd 418 arch/arm64/kernel/hibernate.c __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY)); pmd 313 arch/arm64/mm/dump.c pmd_t pmd = READ_ONCE(*pmdp); pmd 316 arch/arm64/mm/dump.c if (pmd_none(pmd) || pmd_sect(pmd)) { pmd 317 arch/arm64/mm/dump.c note_page(st, addr, 3, pmd_val(pmd)); pmd 319 arch/arm64/mm/dump.c BUG_ON(pmd_bad(pmd)); pmd 160 arch/arm64/mm/fault.c pmd_t *pmdp, pmd; pmd 173 arch/arm64/mm/fault.c pmd = READ_ONCE(*pmdp); pmd 174 arch/arm64/mm/fault.c pr_cont(", pmd=%016llx", pmd_val(pmd)); pmd 175 arch/arm64/mm/fault.c if (pmd_none(pmd) || pmd_bad(pmd)) pmd 42 arch/arm64/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 44 arch/arm64/mm/hugetlbpage.c return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); pmd 265 arch/arm64/mm/hugetlbpage.c pmd_t *pmdp, pmd; pmd 284 arch/arm64/mm/hugetlbpage.c pmd = READ_ONCE(*pmdp); pmd 286 arch/arm64/mm/hugetlbpage.c pmd_none(pmd)) pmd 288 arch/arm64/mm/hugetlbpage.c if (pmd_huge(pmd) || !pmd_present(pmd)) pmd 171 arch/arm64/mm/mmu.c pmd_t pmd = READ_ONCE(*pmdp); pmd 173 arch/arm64/mm/mmu.c BUG_ON(pmd_sect(pmd)); pmd 174 arch/arm64/mm/mmu.c if (pmd_none(pmd)) { pmd 179 arch/arm64/mm/mmu.c pmd = READ_ONCE(*pmdp); pmd 181 arch/arm64/mm/mmu.c BUG_ON(pmd_bad(pmd)); pmd 695 arch/arm64/mm/mmu.c pmd_t *pmdp, pmd; pmd 714 arch/arm64/mm/mmu.c pmd = READ_ONCE(*pmdp); pmd 715 arch/arm64/mm/mmu.c if (pmd_none(pmd)) pmd 718 arch/arm64/mm/mmu.c if (pmd_sect(pmd)) pmd 719 arch/arm64/mm/mmu.c return pfn_valid(pmd_pfn(pmd)); pmd 1003 arch/arm64/mm/mmu.c pmd_t pmd; pmd 1005 arch/arm64/mm/mmu.c pmd = READ_ONCE(*pmdp); pmd 1007 arch/arm64/mm/mmu.c if (!pmd_table(pmd)) { pmd 202 arch/arm64/mm/pageattr.c pmd_t *pmdp, pmd; pmd 221 arch/arm64/mm/pageattr.c pmd = READ_ONCE(*pmdp); pmd 222 arch/arm64/mm/pageattr.c if (pmd_none(pmd)) pmd 224 arch/arm64/mm/pageattr.c if (pmd_sect(pmd)) pmd 14 arch/csky/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 17 arch/csky/include/asm/pgalloc.h set_pmd(pmd, __pmd(__pa(pte))); pmd 20 arch/csky/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 23 arch/csky/include/asm/pgalloc.h set_pmd(pmd, __pmd(__pa(page_address(pte)))); pmd 26 arch/csky/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 46 arch/csky/include/asm/pgtable.h #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) pmd 127 arch/csky/include/asm/pgtable.h static inline pte_t *pmd_page_vaddr(pmd_t pmd) pmd 131 arch/csky/include/asm/pgtable.h ptr = pmd_val(pmd); pmd 136 arch/csky/include/asm/pgtable.h #define pmd_phys(pmd) pmd_val(pmd) pmd 138 arch/csky/include/asm/pgtable.h static inline void set_pmd(pmd_t *p, pmd_t pmd) pmd 140 arch/csky/include/asm/pgtable.h *p = pmd; pmd 149 arch/csky/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 151 arch/csky/include/asm/pgtable.h return pmd_val(pmd) == __pa(invalid_pte_table); pmd 154 arch/csky/include/asm/pgtable.h #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) pmd 156 arch/csky/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 158 arch/csky/include/asm/pgtable.h return (pmd_val(pmd) != __pa(invalid_pte_table)); pmd 80 arch/csky/mm/fault.c pmd_t *pmd, *pmd_k; pmd 98 arch/csky/mm/fault.c pmd = pmd_offset(pud, address); pmd 102 arch/csky/mm/fault.c set_pmd(pmd, *pmd_k); pmd 126 arch/csky/mm/highmem.c pmd_t *pmd; pmd 140 arch/csky/mm/highmem.c pmd = (pmd_t *)pud; pmd 141 arch/csky/mm/highmem.c for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { pmd 142 arch/csky/mm/highmem.c if (pmd_none(*pmd)) { pmd 149 arch/csky/mm/highmem.c set_pmd(pmd, __pmd(__pa(pte))); pmd 150 arch/csky/mm/highmem.c BUG_ON(pte != pte_offset_kernel(pmd, 0)); pmd 167 arch/csky/mm/highmem.c pmd_t *pmd; pmd 188 arch/csky/mm/highmem.c pmd = pmd_offset(pud, vaddr); pmd 189 arch/csky/mm/highmem.c pte = pte_offset_kernel(pmd, vaddr); pmd 49 arch/hexagon/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 56 arch/hexagon/include/asm/pgalloc.h set_pmd(pmd, __pmd(((unsigned long)page_to_pfn(pte) << PAGE_SHIFT) | pmd 69 arch/hexagon/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 82 arch/hexagon/include/asm/pgalloc.h set_pmd(pmd, __pmd(((unsigned long)__pa(pte)) | HEXAGON_L1_PTE_SIZE)); pmd 88 arch/hexagon/include/asm/pgalloc.h pmdindex = (pgd_t *)pmd - mm->pgd; pmd 245 arch/hexagon/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 247 arch/hexagon/include/asm/pgtable.h return pmd_val(pmd) == _NULL_PMD; pmd 256 arch/hexagon/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 258 arch/hexagon/include/asm/pgtable.h return pmd_val(pmd) != (unsigned long)_NULL_PMD; pmd 266 arch/hexagon/include/asm/pgtable.h static inline int pmd_bad(pmd_t pmd) pmd 274 arch/hexagon/include/asm/pgtable.h #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) pmd 275 arch/hexagon/include/asm/pgtable.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 422 arch/hexagon/include/asm/pgtable.h #define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr) pmd 177 arch/ia64/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 186 arch/ia64/include/asm/page.h # define pmd_val(x) ((x).pmd) pmd 57 arch/ia64/include/asm/pgalloc.h pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) pmd 59 arch/ia64/include/asm/pgalloc.h pud_val(*pud_entry) = __pa(pmd); pmd 67 arch/ia64/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 69 arch/ia64/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 72 arch/ia64/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) pmd 79 arch/ia64/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 271 arch/ia64/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 272 arch/ia64/include/asm/pgtable.h #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) pmd 273 arch/ia64/include/asm/pgtable.h #define pmd_present(pmd) (pmd_val(pmd) != 0UL) pmd 275 arch/ia64/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) pmd 276 arch/ia64/include/asm/pgtable.h #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) pmd 978 arch/ia64/kernel/efi.c efi_memory_desc_t *md, *pmd = NULL, *check_md; pmd 994 arch/ia64/kernel/efi.c for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { pmd 999 arch/ia64/kernel/efi.c if (pmd == NULL || !efi_wb(pmd) || pmd 1000 arch/ia64/kernel/efi.c efi_md_end(pmd) != md->phys_addr) { pmd 1055 arch/ia64/kernel/efi.c efi_memory_desc_t *md, *pmd = NULL, *check_md; pmd 1065 arch/ia64/kernel/efi.c for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { pmd 1078 arch/ia64/kernel/efi.c if (pmd == NULL || !efi_wb(pmd) || pmd 1079 arch/ia64/kernel/efi.c efi_md_end(pmd) != md->phys_addr) { pmd 33 arch/ia64/mm/fault.c pmd_t *pmd; pmd 44 arch/ia64/mm/fault.c pmd = pmd_offset(pud, address); pmd 45 arch/ia64/mm/fault.c if (pmd_none(*pmd) || pmd_bad(*pmd)) pmd 48 arch/ia64/mm/fault.c ptep = pte_offset_kernel(pmd, address); pmd 34 arch/ia64/mm/hugetlbpage.c pmd_t *pmd; pmd 40 arch/ia64/mm/hugetlbpage.c pmd = pmd_alloc(mm, pud, taddr); pmd 41 arch/ia64/mm/hugetlbpage.c if (pmd) pmd 42 arch/ia64/mm/hugetlbpage.c pte = pte_alloc_map(mm, pmd, taddr); pmd 53 arch/ia64/mm/hugetlbpage.c pmd_t *pmd; pmd 60 arch/ia64/mm/hugetlbpage.c pmd = pmd_offset(pud, taddr); pmd 61 arch/ia64/mm/hugetlbpage.c if (pmd_present(*pmd)) pmd 62 arch/ia64/mm/hugetlbpage.c pte = pte_offset_map(pmd, taddr); pmd 103 arch/ia64/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 212 arch/ia64/mm/init.c pmd_t *pmd; pmd 221 arch/ia64/mm/init.c pmd = pmd_alloc(&init_mm, pud, address); pmd 222 arch/ia64/mm/init.c if (!pmd) pmd 224 arch/ia64/mm/init.c pte = pte_alloc_kernel(pmd, address); pmd 386 arch/ia64/mm/init.c pmd_t *pmd; pmd 401 arch/ia64/mm/init.c pmd = pmd_offset(pud, end_address); pmd 402 arch/ia64/mm/init.c if (pmd_none(*pmd)) { pmd 407 arch/ia64/mm/init.c pte = pte_offset_kernel(pmd, end_address); pmd 434 arch/ia64/mm/init.c pmd_t *pmd; pmd 455 arch/ia64/mm/init.c pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); pmd 456 arch/ia64/mm/init.c if (!pmd) pmd 458 arch/ia64/mm/init.c pud_populate(&init_mm, pud, pmd); pmd 460 arch/ia64/mm/init.c pmd = pmd_offset(pud, address); pmd 462 arch/ia64/mm/init.c if (pmd_none(*pmd)) { pmd 466 arch/ia64/mm/init.c pmd_populate_kernel(&init_mm, pmd, pte); pmd 468 arch/ia64/mm/init.c pte = pte_offset_kernel(pmd, address); pmd 34 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ pmd 37 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte)) pmd 39 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 48 arch/m68k/include/asm/mcf_pgalloc.h #define __pmd_free_tlb(tlb, pmd, address) do { } while (0) pmd 84 arch/m68k/include/asm/mcf_pgalloc.h #define pmd_free(mm, pmd) BUG() pmd 103 arch/m68k/include/asm/mcf_pgalloc.h #define pgd_populate(mm, pmd, pte) BUG() pmd 173 arch/m68k/include/asm/mcf_pgtable.h #define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd))) pmd 194 arch/m68k/include/asm/mcf_pgtable.h static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); } pmd 195 arch/m68k/include/asm/mcf_pgtable.h #define pmd_none(pmd) pmd_none2(&(pmd)) pmd 196 arch/m68k/include/asm/mcf_pgtable.h static inline int pmd_bad2(pmd_t *pmd) { return 0; } pmd 197 arch/m68k/include/asm/mcf_pgtable.h #define pmd_bad(pmd) pmd_bad2(&(pmd)) pmd 198 arch/m68k/include/asm/mcf_pgtable.h #define pmd_present(pmd) (!pmd_none2(&(pmd))) pmd 399 arch/m68k/include/asm/mcf_pgtable.h #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) pmd 103 arch/m68k/include/asm/mmu_context.h pmd_t *pmd; pmd 130 arch/m68k/include/asm/mmu_context.h pmd = pmd_offset(pgd, mmuar); pmd 131 arch/m68k/include/asm/mmu_context.h if (pmd_none(*pmd)) pmd 134 arch/m68k/include/asm/mmu_context.h pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) pmd 135 arch/m68k/include/asm/mmu_context.h : pte_offset_map(pmd, mmuar); pmd 75 arch/m68k/include/asm/motorola_pgalloc.h static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 77 arch/m68k/include/asm/motorola_pgalloc.h return free_pointer_table(pmd); pmd 80 arch/m68k/include/asm/motorola_pgalloc.h static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, pmd 83 arch/m68k/include/asm/motorola_pgalloc.h return free_pointer_table(pmd); pmd 98 arch/m68k/include/asm/motorola_pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd 100 arch/m68k/include/asm/motorola_pgalloc.h pmd_set(pmd, pte); pmd 103 arch/m68k/include/asm/motorola_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd 105 arch/m68k/include/asm/motorola_pgalloc.h pmd_set(pmd, page_address(page)); pmd 107 arch/m68k/include/asm/motorola_pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 109 arch/m68k/include/asm/motorola_pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pmd 111 arch/m68k/include/asm/motorola_pgalloc.h pgd_set(pgd, pmd); pmd 112 arch/m68k/include/asm/motorola_pgtable.h unsigned long *ptr = pmdp->pmd; pmd 126 arch/m68k/include/asm/motorola_pgtable.h #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK)) pmd 138 arch/m68k/include/asm/motorola_pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 139 arch/m68k/include/asm/motorola_pgtable.h #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE) pmd 140 arch/m68k/include/asm/motorola_pgtable.h #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE) pmd 142 arch/m68k/include/asm/motorola_pgtable.h unsigned long *__ptr = pmdp->pmd; \ pmd 147 arch/m68k/include/asm/motorola_pgtable.h #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) pmd 25 arch/m68k/include/asm/page.h typedef struct { unsigned long pmd[16]; } pmd_t; pmd 31 arch/m68k/include/asm/page.h #define pmd_val(x) ((&x)->pmd[0]) pmd 28 arch/m68k/include/asm/sun3_pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd 30 arch/m68k/include/asm/sun3_pgalloc.h pmd_val(*pmd) = __pa((unsigned long)pte); pmd 33 arch/m68k/include/asm/sun3_pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) pmd 35 arch/m68k/include/asm/sun3_pgalloc.h pmd_val(*pmd) = __pa((unsigned long)page_address(page)); pmd 37 arch/m68k/include/asm/sun3_pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 61 arch/m68k/include/asm/sun3_pgalloc.h #define pgd_populate(mm, pmd, pte) BUG() pmd 120 arch/m68k/include/asm/sun3_pgtable.h #define __pmd_page(pmd) \ pmd 121 arch/m68k/include/asm/sun3_pgtable.h ((unsigned long) __va (pmd_val (pmd) & PAGE_MASK)) pmd 135 arch/m68k/include/asm/sun3_pgtable.h #define pmd_page(pmd) virt_to_page(__pmd_page(pmd)) pmd 138 arch/m68k/include/asm/sun3_pgtable.h static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); } pmd 139 arch/m68k/include/asm/sun3_pgtable.h #define pmd_none(pmd) pmd_none2(&(pmd)) pmd 141 arch/m68k/include/asm/sun3_pgtable.h static inline int pmd_bad2 (pmd_t *pmd) { return 0; } pmd 142 arch/m68k/include/asm/sun3_pgtable.h #define pmd_bad(pmd) pmd_bad2(&(pmd)) pmd 143 arch/m68k/include/asm/sun3_pgtable.h static inline int pmd_present2 (pmd_t *pmd) { return pmd_val (*pmd) & SUN3_PMD_VALID; } pmd 145 arch/m68k/include/asm/sun3_pgtable.h #define pmd_present(pmd) (!pmd_none2(&(pmd))) pmd 205 arch/m68k/include/asm/sun3_pgtable.h #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address)) pmd 206 arch/m68k/include/asm/sun3_pgtable.h #define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address)) pmd 468 arch/m68k/kernel/sys_m68k.c pmd_t *pmd; pmd 477 arch/m68k/kernel/sys_m68k.c pmd = pmd_offset(pgd, (unsigned long)mem); pmd 478 arch/m68k/kernel/sys_m68k.c if (!pmd_present(*pmd)) pmd 480 arch/m68k/kernel/sys_m68k.c pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); pmd 206 arch/m68k/mm/kmap.c pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; pmd 275 arch/m68k/mm/kmap.c int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; pmd 278 arch/m68k/mm/kmap.c pmd_dir->pmd[pmd_off] = 0; pmd 354 arch/m68k/mm/kmap.c if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { pmd 355 arch/m68k/mm/kmap.c pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & pmd 95 arch/m68k/mm/mcfmmu.c pmd_t *pmd; pmd 116 arch/m68k/mm/mcfmmu.c pmd = pmd_offset(pgd, mmuar); pmd 117 arch/m68k/mm/mcfmmu.c if (pmd_none(*pmd)) { pmd 122 arch/m68k/mm/mcfmmu.c pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) pmd 123 arch/m68k/mm/mcfmmu.c : pte_offset_map(pmd, mmuar); pmd 76 arch/m68k/mm/motorola.c unsigned long pmd, last; pmd 87 arch/m68k/mm/motorola.c pmd = __pgd_page(kernel_pg_dir[i]); pmd 88 arch/m68k/mm/motorola.c if (pmd > last) pmd 89 arch/m68k/mm/motorola.c last = pmd; pmd 166 arch/m68k/mm/motorola.c pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; pmd 175 arch/m68k/mm/motorola.c pmd_dir->pmd[0] = virt_to_phys(pte_dir) | pmd 95 arch/m68k/sun3x/dvma.c pmd_t *pmd; pmd 98 arch/m68k/sun3x/dvma.c if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) { pmd 112 arch/m68k/sun3x/dvma.c if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) { pmd 93 arch/microblaze/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 106 arch/microblaze/include/asm/page.h # define pmd_val(x) ((x).pmd) pmd 27 arch/microblaze/include/asm/pgalloc.h extern void __bad_pte(pmd_t *pmd); pmd 42 arch/microblaze/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 55 arch/microblaze/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte) \ pmd 56 arch/microblaze/include/asm/pgalloc.h (pmd_val(*(pmd)) = (unsigned long)page_address(pte)) pmd 58 arch/microblaze/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \ pmd 59 arch/microblaze/include/asm/pgalloc.h (pmd_val(*(pmd)) = (unsigned long) (pte)) pmd 68 arch/microblaze/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte) BUG() pmd 301 arch/microblaze/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 302 arch/microblaze/include/asm/pgtable.h #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0) pmd 303 arch/microblaze/include/asm/pgtable.h #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0) pmd 470 arch/microblaze/include/asm/pgtable.h #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) pmd 473 arch/microblaze/include/asm/pgtable.h #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT)) pmd 191 arch/microblaze/mm/pgtable.c pmd_t *pmd; pmd 197 arch/microblaze/mm/pgtable.c pmd = pmd_offset(pgd, addr & PAGE_MASK); pmd 198 arch/microblaze/mm/pgtable.c if (pmd_present(*pmd)) { pmd 199 arch/microblaze/mm/pgtable.c pte = pte_offset_kernel(pmd, addr & PAGE_MASK); pmd 18 arch/mips/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 21 arch/mips/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)pte)); pmd 24 arch/mips/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 27 arch/mips/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd 29 arch/mips/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 38 arch/mips/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 40 arch/mips/include/asm/pgalloc.h set_pud(pud, __pud((unsigned long)pmd)); pmd 65 arch/mips/include/asm/pgalloc.h pmd_t *pmd; pmd 67 arch/mips/include/asm/pgalloc.h pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER); pmd 68 arch/mips/include/asm/pgalloc.h if (pmd) pmd 69 arch/mips/include/asm/pgalloc.h pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); pmd 70 arch/mips/include/asm/pgalloc.h return pmd; pmd 73 arch/mips/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 75 arch/mips/include/asm/pgalloc.h free_pages((unsigned long)pmd, PMD_ORDER); pmd 127 arch/mips/include/asm/pgtable-32.h static inline int pmd_none(pmd_t pmd) pmd 129 arch/mips/include/asm/pgtable-32.h return pmd_val(pmd) == (unsigned long) invalid_pte_table; pmd 132 arch/mips/include/asm/pgtable-32.h static inline int pmd_bad(pmd_t pmd) pmd 136 arch/mips/include/asm/pgtable-32.h if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) pmd 140 arch/mips/include/asm/pgtable-32.h if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) pmd 146 arch/mips/include/asm/pgtable-32.h static inline int pmd_present(pmd_t pmd) pmd 148 arch/mips/include/asm/pgtable-32.h return pmd_val(pmd) != (unsigned long) invalid_pte_table; pmd 241 arch/mips/include/asm/pgtable-64.h typedef struct { unsigned long pmd; } pmd_t; pmd 242 arch/mips/include/asm/pgtable-64.h #define pmd_val(x) ((x).pmd) pmd 252 arch/mips/include/asm/pgtable-64.h static inline int pmd_none(pmd_t pmd) pmd 254 arch/mips/include/asm/pgtable-64.h return pmd_val(pmd) == (unsigned long) invalid_pte_table; pmd 257 arch/mips/include/asm/pgtable-64.h static inline int pmd_bad(pmd_t pmd) pmd 261 arch/mips/include/asm/pgtable-64.h if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) pmd 265 arch/mips/include/asm/pgtable-64.h if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) pmd 271 arch/mips/include/asm/pgtable-64.h static inline int pmd_present(pmd_t pmd) pmd 274 arch/mips/include/asm/pgtable-64.h if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) pmd 275 arch/mips/include/asm/pgtable-64.h return pmd_val(pmd) & _PAGE_PRESENT; pmd 278 arch/mips/include/asm/pgtable-64.h return pmd_val(pmd) != (unsigned long) invalid_pte_table; pmd 94 arch/mips/include/asm/pgtable.h #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) pmd 96 arch/mips/include/asm/pgtable.h #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) pmd 98 arch/mips/include/asm/pgtable.h #define pmd_page(pmd) __pmd_page(pmd) pmd 101 arch/mips/include/asm/pgtable.h #define pmd_page_vaddr(pmd) pmd_val(pmd) pmd 517 arch/mips/include/asm/pgtable.h static inline int pmd_trans_huge(pmd_t pmd) pmd 519 arch/mips/include/asm/pgtable.h return !!(pmd_val(pmd) & _PAGE_HUGE); pmd 522 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd 524 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_HUGE; pmd 526 arch/mips/include/asm/pgtable.h return pmd; pmd 530 arch/mips/include/asm/pgtable.h pmd_t *pmdp, pmd_t pmd); pmd 533 arch/mips/include/asm/pgtable.h static inline int pmd_write(pmd_t pmd) pmd 535 arch/mips/include/asm/pgtable.h return !!(pmd_val(pmd) & _PAGE_WRITE); pmd 538 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd 540 arch/mips/include/asm/pgtable.h pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); pmd 541 arch/mips/include/asm/pgtable.h return pmd; pmd 544 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd 546 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_WRITE; pmd 547 arch/mips/include/asm/pgtable.h if (pmd_val(pmd) & _PAGE_MODIFIED) pmd 548 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_SILENT_WRITE; pmd 550 arch/mips/include/asm/pgtable.h return pmd; pmd 553 arch/mips/include/asm/pgtable.h static inline int pmd_dirty(pmd_t pmd) pmd 555 arch/mips/include/asm/pgtable.h return !!(pmd_val(pmd) & _PAGE_MODIFIED); pmd 558 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkclean(pmd_t pmd) pmd 560 arch/mips/include/asm/pgtable.h pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); pmd 561 arch/mips/include/asm/pgtable.h return pmd; pmd 564 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd 566 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_MODIFIED; pmd 567 arch/mips/include/asm/pgtable.h if (pmd_val(pmd) & _PAGE_WRITE) pmd 568 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_SILENT_WRITE; pmd 570 arch/mips/include/asm/pgtable.h return pmd; pmd 573 arch/mips/include/asm/pgtable.h static inline int pmd_young(pmd_t pmd) pmd 575 arch/mips/include/asm/pgtable.h return !!(pmd_val(pmd) & _PAGE_ACCESSED); pmd 578 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkold(pmd_t pmd) pmd 580 arch/mips/include/asm/pgtable.h pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); pmd 582 arch/mips/include/asm/pgtable.h return pmd; pmd 585 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd 587 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_ACCESSED; pmd 589 arch/mips/include/asm/pgtable.h if (!(pmd_val(pmd) & _PAGE_NO_READ)) pmd 590 arch/mips/include/asm/pgtable.h pmd_val(pmd) |= _PAGE_SILENT_READ; pmd 592 arch/mips/include/asm/pgtable.h return pmd; pmd 598 arch/mips/include/asm/pgtable.h static inline unsigned long pmd_pfn(pmd_t pmd) pmd 600 arch/mips/include/asm/pgtable.h return pmd_val(pmd) >> _PFN_SHIFT; pmd 603 arch/mips/include/asm/pgtable.h static inline struct page *pmd_page(pmd_t pmd) pmd 605 arch/mips/include/asm/pgtable.h if (pmd_trans_huge(pmd)) pmd 606 arch/mips/include/asm/pgtable.h return pfn_to_page(pmd_pfn(pmd)); pmd 608 arch/mips/include/asm/pgtable.h return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); pmd 611 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 613 arch/mips/include/asm/pgtable.h pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) | pmd 615 arch/mips/include/asm/pgtable.h return pmd; pmd 618 arch/mips/include/asm/pgtable.h static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd 620 arch/mips/include/asm/pgtable.h pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); pmd 622 arch/mips/include/asm/pgtable.h return pmd; pmd 140 arch/mips/kvm/mmu.c pmd_t *pmd; pmd 159 arch/mips/kvm/mmu.c pmd = pmd_offset(pud, addr); pmd 160 arch/mips/kvm/mmu.c if (pmd_none(*pmd)) { pmd 167 arch/mips/kvm/mmu.c pmd_populate_kernel(NULL, pmd, new_pte); pmd 169 arch/mips/kvm/mmu.c return pte_offset(pmd, addr); pmd 202 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, pmd 213 arch/mips/kvm/mmu.c if (!pmd_present(pmd[i])) pmd 216 arch/mips/kvm/mmu.c pte = pte_offset(pmd + i, 0); pmd 221 arch/mips/kvm/mmu.c pmd_clear(pmd + i); pmd 233 arch/mips/kvm/mmu.c pmd_t *pmd; pmd 244 arch/mips/kvm/mmu.c pmd = pmd_offset(pud + i, 0); pmd 248 arch/mips/kvm/mmu.c if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) { pmd 250 arch/mips/kvm/mmu.c pmd_free(NULL, pmd); pmd 331 arch/mips/kvm/mmu.c static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \ pmd 342 arch/mips/kvm/mmu.c if (!pmd_present(pmd[i])) \ pmd 345 arch/mips/kvm/mmu.c pte = pte_offset(pmd + i, 0); \ pmd 358 arch/mips/kvm/mmu.c pmd_t *pmd; \ pmd 368 arch/mips/kvm/mmu.c pmd = pmd_offset(pud + i, 0); \ pmd 372 arch/mips/kvm/mmu.c ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \ pmd 860 arch/mips/kvm/mmu.c static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, pmd 871 arch/mips/kvm/mmu.c if (!pmd_present(pmd[i])) pmd 874 arch/mips/kvm/mmu.c pte = pte_offset(pmd + i, 0); pmd 879 arch/mips/kvm/mmu.c pmd_clear(pmd + i); pmd 891 arch/mips/kvm/mmu.c pmd_t *pmd; pmd 902 arch/mips/kvm/mmu.c pmd = pmd_offset(pud + i, 0); pmd 906 arch/mips/kvm/mmu.c if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { pmd 908 arch/mips/kvm/mmu.c pmd_free(NULL, pmd); pmd 568 arch/mips/kvm/trap_emul.c pmd_t *pmd; pmd 587 arch/mips/kvm/trap_emul.c pmd = pmd_offset(pud + j, 0); pmd 589 arch/mips/kvm/trap_emul.c if (pmd_none(pmd[k])) pmd 595 arch/mips/kvm/trap_emul.c pte = pte_offset(pmd + k, 0); pmd 598 arch/mips/kvm/trap_emul.c pmd_free(NULL, pmd); pmd 298 arch/mips/mm/fault.c pmd_t *pmd, *pmd_k; pmd 313 arch/mips/mm/fault.c pmd = pmd_offset(pud, address); pmd 317 arch/mips/mm/fault.c set_pmd(pmd, *pmd_k); pmd 44 arch/mips/mm/hugetlbpage.c pmd_t *pmd = NULL; pmd 50 arch/mips/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 52 arch/mips/mm/hugetlbpage.c return (pte_t *) pmd; pmd 67 arch/mips/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 69 arch/mips/mm/hugetlbpage.c return (pmd_val(pmd) & _PAGE_HUGE) != 0; pmd 236 arch/mips/mm/init.c pmd_t *pmd; pmd 250 arch/mips/mm/init.c pmd = (pmd_t *)pud; pmd 251 arch/mips/mm/init.c for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { pmd 252 arch/mips/mm/init.c if (pmd_none(*pmd)) { pmd 260 arch/mips/mm/init.c set_pmd(pmd, __pmd((unsigned long)pte)); pmd 261 arch/mips/mm/init.c BUG_ON(pte != pte_offset_kernel(pmd, 0)); pmd 47 arch/mips/mm/ioremap.c static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, pmd 59 arch/mips/mm/ioremap.c pte_t * pte = pte_alloc_kernel(pmd, address); pmd 64 arch/mips/mm/ioremap.c pmd++; pmd 82 arch/mips/mm/ioremap.c pmd_t *pmd; pmd 88 arch/mips/mm/ioremap.c pmd = pmd_alloc(&init_mm, pud, address); pmd 89 arch/mips/mm/ioremap.c if (!pmd) pmd 91 arch/mips/mm/ioremap.c if (remap_area_pmd(pmd, address, end - address, pmd 37 arch/mips/mm/pgtable-32.c pmd_t pmd; pmd 39 arch/mips/mm/pgtable-32.c pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); pmd 41 arch/mips/mm/pgtable-32.c return pmd; pmd 46 arch/mips/mm/pgtable-32.c pmd_t *pmdp, pmd_t pmd) pmd 48 arch/mips/mm/pgtable-32.c *pmdp = pmd; pmd 60 arch/mips/mm/pgtable-32.c pmd_t *pmd; pmd 86 arch/mips/mm/pgtable-32.c pmd = pmd_offset(pud, vaddr); pmd 87 arch/mips/mm/pgtable-32.c pte = pte_offset_kernel(pmd, vaddr); pmd 93 arch/mips/mm/pgtable-64.c pmd_t pmd; pmd 95 arch/mips/mm/pgtable-64.c pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot); pmd 97 arch/mips/mm/pgtable-64.c return pmd; pmd 101 arch/mips/mm/pgtable-64.c pmd_t *pmdp, pmd_t pmd) pmd 103 arch/mips/mm/pgtable-64.c *pmdp = pmd; pmd 720 arch/mips/mm/tlbex.c unsigned int pmd, int lid) pmd 722 arch/mips/mm/tlbex.c UASM_i_LW(p, tmp, 0, pmd); pmd 19 arch/nds32/include/asm/pgalloc.h #define pmd_free(mm, pmd) do { } while (0) pmd 20 arch/nds32/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte) BUG() pmd 21 arch/nds32/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 41 arch/nds32/include/asm/pgtable.h #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) pmd 198 arch/nds32/include/asm/pgtable.h #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) pmd 210 arch/nds32/include/asm/pgtable.h static inline void set_pmd(pmd_t * pmdp, pmd_t pmd) pmd 213 arch/nds32/include/asm/pgtable.h *pmdp = pmd; pmd 310 arch/nds32/include/asm/pgtable.h #define pmd_none(pmd) (pmd_val(pmd)&0x1) pmd 311 arch/nds32/include/asm/pgtable.h #define pmd_present(pmd) (!pmd_none(pmd)) pmd 312 arch/nds32/include/asm/pgtable.h #define pmd_bad(pmd) pmd_none(pmd) pmd 320 arch/nds32/include/asm/pgtable.h pmd_t pmd; pmd 327 arch/nds32/include/asm/pgtable.h pmd_val(pmd) = __virt_to_phys(ptr) | prot; pmd 328 arch/nds32/include/asm/pgtable.h return pmd; pmd 331 arch/nds32/include/asm/pgtable.h #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) pmd 10 arch/nds32/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tln)->mm, pmd) pmd 34 arch/nds32/mm/fault.c pmd_t *pmd; pmd 44 arch/nds32/mm/fault.c pmd = pmd_offset(pgd, addr); pmd 46 arch/nds32/mm/fault.c pr_alert(", *pmd=%08lx", pmd_val(*pmd)); pmd 49 arch/nds32/mm/fault.c if (pmd_none(*pmd)) pmd 52 arch/nds32/mm/fault.c if (pmd_bad(*pmd)) { pmd 61 arch/nds32/mm/fault.c pte = pte_offset_map(pmd, addr); pmd 363 arch/nds32/mm/fault.c pmd_t *pmd, *pmd_k; pmd 377 arch/nds32/mm/fault.c pmd = pmd_offset(pud, addr); pmd 382 arch/nds32/mm/fault.c if (!pmd_present(*pmd)) pmd 383 arch/nds32/mm/fault.c set_pmd(pmd, *pmd_k); pmd 385 arch/nds32/mm/fault.c BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); pmd 104 arch/nds32/mm/init.c pmd_t *pmd; pmd 115 arch/nds32/mm/init.c pmd = pmd_offset(pud, vaddr); pmd 120 arch/nds32/mm/init.c set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE)); pmd 130 arch/nds32/mm/init.c pmd = pmd_offset(pud, vaddr); pmd 135 arch/nds32/mm/init.c set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); pmd 43 arch/nds32/mm/mm-nds32.c pmd_t *pmd; pmd 49 arch/nds32/mm/mm-nds32.c pmd = (pmd_t *) pgd; pmd 50 arch/nds32/mm/mm-nds32.c if (pmd_none(*pmd)) pmd 52 arch/nds32/mm/mm-nds32.c if (pmd_bad(*pmd)) { pmd 53 arch/nds32/mm/mm-nds32.c pmd_ERROR(*pmd); pmd 54 arch/nds32/mm/mm-nds32.c pmd_clear(pmd); pmd 58 arch/nds32/mm/mm-nds32.c pte = pmd_page(*pmd); pmd 59 arch/nds32/mm/mm-nds32.c pmd_clear(pmd); pmd 63 arch/nds32/mm/mm-nds32.c pmd_free(mm, pmd); pmd 77 arch/nds32/mm/mm-nds32.c pmd_t *pmd; pmd 87 arch/nds32/mm/mm-nds32.c pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); pmd 88 arch/nds32/mm/mm-nds32.c set_pmd(pmd, __pmd(pmdval)); pmd 19 arch/nds32/mm/proc.c pmd_t *pmd; pmd 22 arch/nds32/mm/proc.c pmd = pmd_offset(pgd_offset_k(addr), addr); pmd 23 arch/nds32/mm/proc.c if (!pmd_none(*pmd)) { pmd 24 arch/nds32/mm/proc.c ptep = pte_offset_map(pmd, addr); pmd 36 arch/nds32/mm/proc.c pmd_t *pmd; pmd 43 arch/nds32/mm/proc.c pmd = pmd_offset(pud, addr); pmd 44 arch/nds32/mm/proc.c if (!pmd_none(*pmd)) { pmd 45 arch/nds32/mm/proc.c ptep = pte_offset_map(pmd, addr); pmd 17 arch/nios2/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 20 arch/nios2/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)pte)); pmd 23 arch/nios2/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 26 arch/nios2/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd 28 arch/nios2/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 187 arch/nios2/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 189 arch/nios2/include/asm/pgtable.h return (pmd_val(pmd) != (unsigned long) invalid_pte_table) pmd 190 arch/nios2/include/asm/pgtable.h && (pmd_val(pmd) != 0UL); pmd 219 arch/nios2/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 221 arch/nios2/include/asm/pgtable.h return (pmd_val(pmd) == pmd 222 arch/nios2/include/asm/pgtable.h (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL); pmd 225 arch/nios2/include/asm/pgtable.h #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) pmd 249 arch/nios2/include/asm/pgtable.h #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) pmd 250 arch/nios2/include/asm/pgtable.h #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) pmd 251 arch/nios2/include/asm/pgtable.h #define pmd_page_vaddr(pmd) pmd_val(pmd) pmd 249 arch/nios2/mm/fault.c pmd_t *pmd, *pmd_k; pmd 263 arch/nios2/mm/fault.c pmd = pmd_offset(pud, address); pmd 267 arch/nios2/mm/fault.c set_pmd(pmd, *pmd_k); pmd 50 arch/nios2/mm/ioremap.c static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, pmd 64 arch/nios2/mm/ioremap.c pte_t *pte = pte_alloc_kernel(pmd, address); pmd 71 arch/nios2/mm/ioremap.c pmd++; pmd 90 arch/nios2/mm/ioremap.c pmd_t *pmd; pmd 96 arch/nios2/mm/ioremap.c pmd = pmd_alloc(&init_mm, pud, address); pmd 97 arch/nios2/mm/ioremap.c if (!pmd) pmd 99 arch/nios2/mm/ioremap.c if (remap_area_pmd(pmd, address, end - address, pmd 25 arch/openrisc/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \ pmd 26 arch/openrisc/include/asm/pgalloc.h set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))) pmd 28 arch/openrisc/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 31 arch/openrisc/include/asm/pgalloc.h set_pmd(pmd, __pmd(_KERNPG_TABLE + pmd 102 arch/openrisc/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 368 arch/openrisc/include/asm/pgtable.h #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) pmd 369 arch/openrisc/include/asm/pgtable.h #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) pmd 300 arch/openrisc/mm/fault.c pmd_t *pmd, *pmd_k; pmd 330 arch/openrisc/mm/fault.c pmd = pmd_offset(pud, address); pmd 336 arch/openrisc/mm/fault.c set_pmd(pmd, *pmd_k); pmd 45 arch/parisc/include/asm/page.h typedef struct { __u32 pmd; } pmd_t; pmd 51 arch/parisc/include/asm/page.h #define pmd_val(x) ((x).pmd + 0) pmd 60 arch/parisc/include/asm/page.h #define __pmd_val_set(x,n) (x).pmd = (n) pmd 62 arch/parisc/include/asm/pgalloc.h static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pmd 65 arch/parisc/include/asm/pgalloc.h (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); pmd 70 arch/parisc/include/asm/pgalloc.h pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); pmd 71 arch/parisc/include/asm/pgalloc.h if (pmd) pmd 72 arch/parisc/include/asm/pgalloc.h memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); pmd 73 arch/parisc/include/asm/pgalloc.h return pmd; pmd 76 arch/parisc/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 78 arch/parisc/include/asm/pgalloc.h if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { pmd 88 arch/parisc/include/asm/pgalloc.h free_pages((unsigned long)pmd, PMD_ORDER); pmd 102 arch/parisc/include/asm/pgalloc.h #define pgd_populate(mm, pmd, pte) BUG() pmd 107 arch/parisc/include/asm/pgalloc.h pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd 112 arch/parisc/include/asm/pgalloc.h if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) pmd 113 arch/parisc/include/asm/pgalloc.h __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | pmd 119 arch/parisc/include/asm/pgalloc.h __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) pmd 123 arch/parisc/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte_page) \ pmd 124 arch/parisc/include/asm/pgalloc.h pmd_populate_kernel(mm, pmd, page_address(pte_page)) pmd 125 arch/parisc/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 332 arch/parisc/include/asm/pgtable.h static inline void pmd_clear(pmd_t *pmd) { pmd 334 arch/parisc/include/asm/pgtable.h if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) pmd 337 arch/parisc/include/asm/pgtable.h __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); pmd 340 arch/parisc/include/asm/pgtable.h __pmd_val_set(*pmd, 0); pmd 436 arch/parisc/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) pmd 438 arch/parisc/include/asm/pgtable.h #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) pmd 439 arch/parisc/include/asm/pgtable.h #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) pmd 462 arch/parisc/include/asm/pgtable.h #define pte_offset_kernel(pmd, address) \ pmd 463 arch/parisc/include/asm/pgtable.h ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) pmd 464 arch/parisc/include/asm/pgtable.h #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) pmd 7 arch/parisc/include/asm/tlb.h #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) pmd 539 arch/parisc/kernel/cache.c pmd_t *pmd = pmd_offset(pud, addr); pmd 540 arch/parisc/kernel/cache.c if (!pmd_none(*pmd)) pmd 541 arch/parisc/kernel/cache.c ptep = pte_offset_map(pmd, addr); pmd 105 arch/parisc/kernel/pci-dma.c static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, pmd 116 arch/parisc/kernel/pci-dma.c pte_t * pte = pte_alloc_kernel(pmd, vaddr); pmd 123 arch/parisc/kernel/pci-dma.c pmd++; pmd 136 arch/parisc/kernel/pci-dma.c pmd_t *pmd; pmd 138 arch/parisc/kernel/pci-dma.c pmd = pmd_alloc(NULL, dir, vaddr); pmd 139 arch/parisc/kernel/pci-dma.c if (!pmd) pmd 141 arch/parisc/kernel/pci-dma.c if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) pmd 149 arch/parisc/kernel/pci-dma.c static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, pmd 156 arch/parisc/kernel/pci-dma.c if (pmd_none(*pmd)) pmd 158 arch/parisc/kernel/pci-dma.c if (pmd_bad(*pmd)) { pmd 159 arch/parisc/kernel/pci-dma.c pmd_ERROR(*pmd); pmd 160 arch/parisc/kernel/pci-dma.c pmd_clear(pmd); pmd 163 arch/parisc/kernel/pci-dma.c pte = pte_offset_map(pmd, vaddr); pmd 188 arch/parisc/kernel/pci-dma.c pmd_t * pmd; pmd 199 arch/parisc/kernel/pci-dma.c pmd = pmd_offset(dir, vaddr); pmd 205 arch/parisc/kernel/pci-dma.c unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); pmd 208 arch/parisc/kernel/pci-dma.c pmd++; pmd 17 arch/parisc/mm/fixmap.c pmd_t *pmd = pmd_offset(pgd, vaddr); pmd 20 arch/parisc/mm/fixmap.c if (pmd_none(*pmd)) pmd 21 arch/parisc/mm/fixmap.c pmd = pmd_alloc(NULL, pgd, vaddr); pmd 23 arch/parisc/mm/fixmap.c pte = pte_offset_kernel(pmd, vaddr); pmd 25 arch/parisc/mm/fixmap.c pte = pte_alloc_kernel(pmd, vaddr); pmd 35 arch/parisc/mm/fixmap.c pmd_t *pmd = pmd_offset(pgd, vaddr); pmd 36 arch/parisc/mm/fixmap.c pte_t *pte = pte_offset_kernel(pmd, vaddr); pmd 53 arch/parisc/mm/hugetlbpage.c pmd_t *pmd; pmd 66 arch/parisc/mm/hugetlbpage.c pmd = pmd_alloc(mm, pud, addr); pmd 67 arch/parisc/mm/hugetlbpage.c if (pmd) pmd 68 arch/parisc/mm/hugetlbpage.c pte = pte_alloc_map(mm, pmd, addr); pmd 78 arch/parisc/mm/hugetlbpage.c pmd_t *pmd; pmd 87 arch/parisc/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 88 arch/parisc/mm/hugetlbpage.c if (!pmd_none(*pmd)) pmd 89 arch/parisc/mm/hugetlbpage.c pte = pte_offset_map(pmd, addr); pmd 193 arch/parisc/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 355 arch/parisc/mm/init.c pmd_t *pmd; pmd 388 arch/parisc/mm/init.c pmd = (pmd_t *)__pa(pg_dir); pmd 390 arch/parisc/mm/init.c pmd = (pmd_t *)pgd_address(*pg_dir); pmd 396 arch/parisc/mm/init.c if (!pmd) { pmd 397 arch/parisc/mm/init.c pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, pmd 399 arch/parisc/mm/init.c if (!pmd) pmd 401 arch/parisc/mm/init.c pmd = (pmd_t *) __pa(pmd); pmd 404 arch/parisc/mm/init.c pgd_populate(NULL, pg_dir, __va(pmd)); pmd 410 arch/parisc/mm/init.c pmd = (pmd_t *)__va(pmd) + start_pmd; pmd 411 arch/parisc/mm/init.c for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { pmd 417 arch/parisc/mm/init.c pg_table = (pte_t *)pmd_address(*pmd); pmd 426 arch/parisc/mm/init.c pmd_populate_kernel(NULL, pmd, __va(pg_table)); pmd 218 arch/powerpc/include/asm/book3s/32/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 219 arch/powerpc/include/asm/book3s/32/pgtable.h #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) pmd 220 arch/powerpc/include/asm/book3s/32/pgtable.h #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) pmd 347 arch/powerpc/include/asm/book3s/32/pgtable.h #define pmd_page_vaddr(pmd) \ pmd 348 arch/powerpc/include/asm/book3s/32/pgtable.h ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) pmd 349 arch/powerpc/include/asm/book3s/32/pgtable.h #define pmd_page(pmd) \ pmd 350 arch/powerpc/include/asm/book3s/32/pgtable.h pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) pmd 129 arch/powerpc/include/asm/book3s/64/hash-4k.h static inline int hash__pmd_trans_huge(pmd_t pmd) pmd 140 arch/powerpc/include/asm/book3s/64/hash-4k.h static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) pmd 143 arch/powerpc/include/asm/book3s/64/hash-4k.h return pmd; pmd 159 arch/powerpc/include/asm/book3s/64/hash-4k.h static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) pmd 162 arch/powerpc/include/asm/book3s/64/hash-4k.h return pmd; pmd 247 arch/powerpc/include/asm/book3s/64/hash-64k.h static inline int hash__pmd_trans_huge(pmd_t pmd) pmd 249 arch/powerpc/include/asm/book3s/64/hash-64k.h return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == pmd 258 arch/powerpc/include/asm/book3s/64/hash-64k.h static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) pmd 260 arch/powerpc/include/asm/book3s/64/hash-64k.h return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); pmd 276 arch/powerpc/include/asm/book3s/64/hash-64k.h static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) pmd 278 arch/powerpc/include/asm/book3s/64/hash-64k.h return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); pmd 135 arch/powerpc/include/asm/book3s/64/hash.h #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) pmd 115 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 117 arch/powerpc/include/asm/book3s/64/pgalloc.h *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS); pmd 136 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 138 arch/powerpc/include/asm/book3s/64/pgalloc.h pmd_fragment_free((unsigned long *)pmd); pmd 141 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, pmd 149 arch/powerpc/include/asm/book3s/64/pgalloc.h return pgtable_free_tlb(tlb, pmd, PMD_INDEX); pmd 152 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 155 arch/powerpc/include/asm/book3s/64/pgalloc.h *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS); pmd 158 arch/powerpc/include/asm/book3s/64/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 161 arch/powerpc/include/asm/book3s/64/pgalloc.h *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS); pmd 9 arch/powerpc/include/asm/book3s/64/pgtable-4k.h static inline int pmd_huge(pmd_t pmd) pmd 15 arch/powerpc/include/asm/book3s/64/pgtable-4k.h return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); pmd 74 arch/powerpc/include/asm/book3s/64/pgtable-4k.h static inline int pmd_huge(pmd_t pmd) { return 0; } pmd 17 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int pmd_huge(pmd_t pmd) pmd 22 arch/powerpc/include/asm/book3s/64/pgtable-64k.h return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); pmd 63 arch/powerpc/include/asm/book3s/64/pgtable-64k.h static inline int pmd_huge(pmd_t pmd) { return 0; } pmd 744 arch/powerpc/include/asm/book3s/64/pgtable.h #define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd))) pmd 868 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 870 arch/powerpc/include/asm/book3s/64/pgtable.h return !pmd_raw(pmd); pmd 873 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 881 arch/powerpc/include/asm/book3s/64/pgtable.h if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) pmd 887 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_is_serializing(pmd_t pmd) pmd 897 arch/powerpc/include/asm/book3s/64/pgtable.h if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == pmd 904 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_bad(pmd_t pmd) pmd 907 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__pmd_bad(pmd); pmd 908 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__pmd_bad(pmd); pmd 927 arch/powerpc/include/asm/book3s/64/pgtable.h extern struct page *pmd_page(pmd_t pmd); pmd 997 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) pmd 1068 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t pmd_pte(pmd_t pmd) pmd 1070 arch/powerpc/include/asm/book3s/64/pgtable.h return __pte_raw(pmd_raw(pmd)); pmd 1078 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pte_t *pmdp_ptep(pmd_t *pmd) pmd 1080 arch/powerpc/include/asm/book3s/64/pgtable.h return (pte_t *)pmd; pmd 1082 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) pmd 1083 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) pmd 1084 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_young(pmd) pte_young(pmd_pte(pmd)) pmd 1085 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) pmd 1086 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) pmd 1087 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) pmd 1088 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) pmd 1089 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) pmd 1090 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) pmd 1091 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd))) pmd 1092 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd))) pmd 1095 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) pmd 1096 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) pmd 1097 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) pmd 1100 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd))) pmd 1101 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd)) pmd 1102 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd))) pmd 1107 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 1109 arch/powerpc/include/asm/book3s/64/pgtable.h return pte_protnone(pmd_pte(pmd)); pmd 1113 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_write(pmd) pte_write(pmd_pte(pmd)) pmd 1114 arch/powerpc/include/asm/book3s/64/pgtable.h #define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) pmd 1115 arch/powerpc/include/asm/book3s/64/pgtable.h #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) pmd 1118 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pmd_access_permitted(pmd_t pmd, bool write) pmd 1130 arch/powerpc/include/asm/book3s/64/pgtable.h if (pmd_is_serializing(pmd)) pmd 1133 arch/powerpc/include/asm/book3s/64/pgtable.h return pte_access_permitted(pmd_pte(pmd), write); pmd 1139 arch/powerpc/include/asm/book3s/64/pgtable.h extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); pmd 1141 arch/powerpc/include/asm/book3s/64/pgtable.h pmd_t *pmdp, pmd_t pmd); pmd 1143 arch/powerpc/include/asm/book3s/64/pgtable.h pmd_t *pmd); pmd 1166 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_large(pmd_t pmd) pmd 1168 arch/powerpc/include/asm/book3s/64/pgtable.h return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); pmd 1171 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd 1173 arch/powerpc/include/asm/book3s/64/pgtable.h return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); pmd 1211 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_trans_huge(pmd_t pmd) pmd 1213 arch/powerpc/include/asm/book3s/64/pgtable.h if (!pmd_present(pmd)) pmd 1217 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__pmd_trans_huge(pmd); pmd 1218 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__pmd_trans_huge(pmd); pmd 1229 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd 1232 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__pmd_mkhuge(pmd); pmd 1233 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__pmd_mkhuge(pmd); pmd 1304 arch/powerpc/include/asm/book3s/64/pgtable.h static inline pmd_t pmd_mkdevmap(pmd_t pmd) pmd 1307 arch/powerpc/include/asm/book3s/64/pgtable.h return radix__pmd_mkdevmap(pmd); pmd 1308 arch/powerpc/include/asm/book3s/64/pgtable.h return hash__pmd_mkdevmap(pmd); pmd 1311 arch/powerpc/include/asm/book3s/64/pgtable.h static inline int pmd_devmap(pmd_t pmd) pmd 1313 arch/powerpc/include/asm/book3s/64/pgtable.h return pte_devmap(pmd_pte(pmd)); pmd 1360 arch/powerpc/include/asm/book3s/64/pgtable.h static inline bool pmd_is_leaf(pmd_t pmd) pmd 1362 arch/powerpc/include/asm/book3s/64/pgtable.h return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); pmd 214 arch/powerpc/include/asm/book3s/64/radix.h static inline int radix__pmd_bad(pmd_t pmd) pmd 216 arch/powerpc/include/asm/book3s/64/radix.h return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS); pmd 237 arch/powerpc/include/asm/book3s/64/radix.h static inline int radix__pmd_trans_huge(pmd_t pmd) pmd 239 arch/powerpc/include/asm/book3s/64/radix.h return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; pmd 242 arch/powerpc/include/asm/book3s/64/radix.h static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) pmd 244 arch/powerpc/include/asm/book3s/64/radix.h return __pmd(pmd_val(pmd) | _PAGE_PTE); pmd 266 arch/powerpc/include/asm/book3s/64/radix.h static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) pmd 268 arch/powerpc/include/asm/book3s/64/radix.h return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); pmd 118 arch/powerpc/include/asm/book3s/64/tlbflush-hash.h extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, pmd 195 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 196 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) pmd 197 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) pmd 346 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_page_vaddr(pmd) \ pmd 347 arch/powerpc/include/asm/nohash/32/pgtable.h ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) pmd 348 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_page(pmd) \ pmd 349 arch/powerpc/include/asm/nohash/32/pgtable.h pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) pmd 351 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_page_vaddr(pmd) \ pmd 352 arch/powerpc/include/asm/nohash/32/pgtable.h ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) pmd 353 arch/powerpc/include/asm/nohash/32/pgtable.h #define pmd_page(pmd) \ pmd 354 arch/powerpc/include/asm/nohash/32/pgtable.h pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) pmd 31 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 33 arch/powerpc/include/asm/nohash/64/pgalloc.h pud_set(pud, (unsigned long)pmd); pmd 36 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 39 arch/powerpc/include/asm/nohash/64/pgalloc.h pmd_set(pmd, (unsigned long)pte); pmd 42 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 45 arch/powerpc/include/asm/nohash/64/pgalloc.h pmd_set(pmd, (unsigned long)pte_page); pmd 54 arch/powerpc/include/asm/nohash/64/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 56 arch/powerpc/include/asm/nohash/64/pgalloc.h kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); pmd 59 arch/powerpc/include/asm/nohash/64/pgalloc.h #define __pmd_free_tlb(tlb, pmd, addr) \ pmd 60 arch/powerpc/include/asm/nohash/64/pgalloc.h pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) pmd 138 arch/powerpc/include/asm/nohash/64/pgtable.h static inline pte_t pmd_pte(pmd_t pmd) pmd 140 arch/powerpc/include/asm/nohash/64/pgtable.h return __pte(pmd_val(pmd)); pmd 143 arch/powerpc/include/asm/nohash/64/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 144 arch/powerpc/include/asm/nohash/64/pgtable.h #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ pmd 145 arch/powerpc/include/asm/nohash/64/pgtable.h || (pmd_val(pmd) & PMD_BAD_BITS)) pmd 146 arch/powerpc/include/asm/nohash/64/pgtable.h #define pmd_present(pmd) (!pmd_none(pmd)) pmd 147 arch/powerpc/include/asm/nohash/64/pgtable.h #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) pmd 148 arch/powerpc/include/asm/nohash/64/pgtable.h extern struct page *pmd_page(pmd_t pmd); pmd 67 arch/powerpc/include/asm/nohash/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 69 arch/powerpc/include/asm/nohash/pgtable.h return pte_protnone(pmd_pte(pmd)); pmd 277 arch/powerpc/include/asm/nohash/pgtable.h static inline int pmd_huge(pmd_t pmd) pmd 73 arch/powerpc/include/asm/pgalloc.h static inline pgtable_t pmd_pgtable(pmd_t pmd) pmd 75 arch/powerpc/include/asm/pgalloc.h return (pgtable_t)pmd_page_vaddr(pmd); pmd 23 arch/powerpc/include/asm/pgtable-be-types.h typedef struct { __be64 pmd; } pmd_t; pmd 28 arch/powerpc/include/asm/pgtable-be-types.h return be64_to_cpu(x.pmd); pmd 33 arch/powerpc/include/asm/pgtable-be-types.h return x.pmd; pmd 19 arch/powerpc/include/asm/pgtable-types.h typedef struct { unsigned long pmd; } pmd_t; pmd 23 arch/powerpc/include/asm/pgtable-types.h return x.pmd; pmd 83 arch/powerpc/include/asm/pgtable.h #define pmd_large(pmd) 0 pmd 134 arch/powerpc/include/asm/pgtable.h static inline bool pmd_is_leaf(pmd_t pmd) pmd 446 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, pmd 450 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t *p = pmd; pmd 472 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_pmd_free(pmd); pmd 487 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t *pmd; pmd 489 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd = pmd_offset(p, 0); pmd 490 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); pmd 522 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, pmd 525 arch/powerpc/kvm/book3s_64_mmu_radix.c pte_t *pte = pte_offset_kernel(pmd, 0); pmd 532 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_clear(pmd); pmd 541 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t *pmd = pmd_offset(pud, 0); pmd 551 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); pmd 570 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t *pmd, *new_pmd = NULL; pmd 582 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd = NULL; pmd 584 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd = pmd_offset(pud, gpa); pmd 588 arch/powerpc/kvm/book3s_64_mmu_radix.c if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) pmd 656 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd = pmd_offset(pud, gpa); pmd 657 arch/powerpc/kvm/book3s_64_mmu_radix.c if (pmd_is_leaf(*pmd)) { pmd 662 arch/powerpc/kvm/book3s_64_mmu_radix.c if (pmd_raw(*pmd) == pte_raw(pte)) { pmd 667 arch/powerpc/kvm/book3s_64_mmu_radix.c WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & pmd 669 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), pmd 684 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, pmd 688 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!pmd_none(*pmd)) { pmd 694 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); pmd 696 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); pmd 702 arch/powerpc/kvm/book3s_64_mmu_radix.c if (pmd_none(*pmd)) { pmd 705 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_populate(kvm->mm, pmd, new_ptep); pmd 708 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = pte_offset_kernel(pmd, gpa); pmd 1201 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t pmd, *pmdp; pmd 1291 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd = READ_ONCE(*pmdp); pmd 1292 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!(pmd_val(pmd) & _PAGE_PRESENT)) { pmd 1296 arch/powerpc/kvm/book3s_64_mmu_radix.c if (pmd_val(pmd) & _PAGE_PTE) { pmd 1297 arch/powerpc/kvm/book3s_64_mmu_radix.c pte = pmd_val(pmd); pmd 1302 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = pte_offset_kernel(&pmd, gpa); pmd 302 arch/powerpc/mm/book3s32/mmu.c pmd_t *pmd; pmd 306 arch/powerpc/mm/book3s32/mmu.c pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); pmd 307 arch/powerpc/mm/book3s32/mmu.c if (!pmd_none(*pmd)) pmd 308 arch/powerpc/mm/book3s32/mmu.c add_hash_page(mm->context.id, ea, pmd_val(*pmd)); pmd 77 arch/powerpc/mm/book3s32/tlb.c pmd_t *pmd; pmd 90 arch/powerpc/mm/book3s32/tlb.c pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); pmd 95 arch/powerpc/mm/book3s32/tlb.c if (!pmd_none(*pmd)) { pmd 97 arch/powerpc/mm/book3s32/tlb.c flush_hash_pages(ctx, start, pmd_val(*pmd), count); pmd 102 arch/powerpc/mm/book3s32/tlb.c ++pmd; pmd 141 arch/powerpc/mm/book3s32/tlb.c pmd_t *pmd; pmd 148 arch/powerpc/mm/book3s32/tlb.c pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); pmd 149 arch/powerpc/mm/book3s32/tlb.c if (!pmd_none(*pmd)) pmd 150 arch/powerpc/mm/book3s32/tlb.c flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); pmd 36 arch/powerpc/mm/book3s64/hash_hugepage.c pmd_t pmd = READ_ONCE(*pmdp); pmd 38 arch/powerpc/mm/book3s64/hash_hugepage.c old_pmd = pmd_val(pmd); pmd 225 arch/powerpc/mm/book3s64/hash_pgtable.c pmd_t pmd; pmd 231 arch/powerpc/mm/book3s64/hash_pgtable.c pmd = *pmdp; pmd 258 arch/powerpc/mm/book3s64/hash_pgtable.c flush_tlb_pmd_range(vma->vm_mm, &pmd, address); pmd 259 arch/powerpc/mm/book3s64/hash_pgtable.c return pmd; pmd 235 arch/powerpc/mm/book3s64/hash_tlb.c void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) pmd 252 arch/powerpc/mm/book3s64/hash_tlb.c start_pte = pte_offset_map(pmd, addr); pmd 65 arch/powerpc/mm/book3s64/pgtable.c pmd_t *pmdp, pmd_t pmd) pmd 75 arch/powerpc/mm/book3s64/pgtable.c WARN_ON(!(pmd_large(pmd))); pmd 77 arch/powerpc/mm/book3s64/pgtable.c trace_hugepage_set_pmd(addr, pmd_val(pmd)); pmd 78 arch/powerpc/mm/book3s64/pgtable.c return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); pmd 123 arch/powerpc/mm/book3s64/pgtable.c static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) pmd 125 arch/powerpc/mm/book3s64/pgtable.c return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); pmd 141 arch/powerpc/mm/book3s64/pgtable.c pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 145 arch/powerpc/mm/book3s64/pgtable.c pmdv = pmd_val(pmd); pmd 157 arch/powerpc/mm/book3s64/pgtable.c pmd_t *pmd) pmd 331 arch/powerpc/mm/book3s64/pgtable.c pmd_t *pmd; pmd 333 arch/powerpc/mm/book3s64/pgtable.c pmd = get_pmd_from_cache(mm); pmd 334 arch/powerpc/mm/book3s64/pgtable.c if (pmd) pmd 335 arch/powerpc/mm/book3s64/pgtable.c return pmd; pmd 340 arch/powerpc/mm/book3s64/pgtable.c void pmd_fragment_free(unsigned long *pmd) pmd 342 arch/powerpc/mm/book3s64/pgtable.c struct page *page = virt_to_page(pmd); pmd 662 arch/powerpc/mm/book3s64/radix_pgtable.c static void free_pte_table(pte_t *pte_start, pmd_t *pmd) pmd 674 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_clear(pmd); pmd 679 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t *pmd; pmd 683 arch/powerpc/mm/book3s64/radix_pgtable.c pmd = pmd_start + i; pmd 684 arch/powerpc/mm/book3s64/radix_pgtable.c if (!pmd_none(*pmd)) pmd 795 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t *pmd; pmd 797 arch/powerpc/mm/book3s64/radix_pgtable.c pmd = pmd_start + pmd_index(addr); pmd 798 arch/powerpc/mm/book3s64/radix_pgtable.c for (; addr < end; addr = next, pmd++) { pmd 801 arch/powerpc/mm/book3s64/radix_pgtable.c if (!pmd_present(*pmd)) pmd 804 arch/powerpc/mm/book3s64/radix_pgtable.c if (pmd_is_leaf(*pmd)) { pmd 805 arch/powerpc/mm/book3s64/radix_pgtable.c split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); pmd 809 arch/powerpc/mm/book3s64/radix_pgtable.c pte_base = (pte_t *)pmd_page_vaddr(*pmd); pmd 811 arch/powerpc/mm/book3s64/radix_pgtable.c free_pte_table(pte_base, pmd); pmd 944 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t pmd; pmd 952 arch/powerpc/mm/book3s64/radix_pgtable.c pmd = *pmdp; pmd 960 arch/powerpc/mm/book3s64/radix_pgtable.c return pmd; pmd 1125 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_t *pmd; pmd 1128 arch/powerpc/mm/book3s64/radix_pgtable.c pmd = (pmd_t *)pud_page_vaddr(*pud); pmd 1134 arch/powerpc/mm/book3s64/radix_pgtable.c if (!pmd_none(pmd[i])) { pmd 1136 arch/powerpc/mm/book3s64/radix_pgtable.c pte = (pte_t *)pmd_page_vaddr(pmd[i]); pmd 1142 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_free(&init_mm, pmd); pmd 1147 arch/powerpc/mm/book3s64/radix_pgtable.c int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) pmd 1149 arch/powerpc/mm/book3s64/radix_pgtable.c pte_t *ptep = (pte_t *)pmd; pmd 1160 arch/powerpc/mm/book3s64/radix_pgtable.c int pmd_clear_huge(pmd_t *pmd) pmd 1162 arch/powerpc/mm/book3s64/radix_pgtable.c if (pmd_huge(*pmd)) { pmd 1163 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_clear(pmd); pmd 1170 arch/powerpc/mm/book3s64/radix_pgtable.c int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) pmd 1174 arch/powerpc/mm/book3s64/radix_pgtable.c pte = (pte_t *)pmd_page_vaddr(*pmd); pmd 1175 arch/powerpc/mm/book3s64/radix_pgtable.c pmd_clear(pmd); pmd 58 arch/powerpc/mm/book3s64/subpage_prot.c pmd_t *pmd; pmd 68 arch/powerpc/mm/book3s64/subpage_prot.c pmd = pmd_offset(pud, addr); pmd 69 arch/powerpc/mm/book3s64/subpage_prot.c if (pmd_none(*pmd)) pmd 71 arch/powerpc/mm/book3s64/subpage_prot.c pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 134 arch/powerpc/mm/book3s64/subpage_prot.c static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, pmd 138 arch/powerpc/mm/book3s64/subpage_prot.c split_huge_pmd(vma, pmd, addr); pmd 344 arch/powerpc/mm/hugetlbpage.c pmd_t *pmd; pmd 352 arch/powerpc/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 354 arch/powerpc/mm/hugetlbpage.c if (!is_hugepd(__hugepd(pmd_val(*pmd)))) { pmd 359 arch/powerpc/mm/hugetlbpage.c WARN_ON(!pmd_none_or_clear_bad(pmd)); pmd 368 arch/powerpc/mm/hugetlbpage.c more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); pmd 372 arch/powerpc/mm/hugetlbpage.c free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT, pmd 387 arch/powerpc/mm/hugetlbpage.c pmd = pmd_offset(pud, start); pmd 389 arch/powerpc/mm/hugetlbpage.c pmd_free_tlb(tlb, pmd, start); pmd 35 arch/powerpc/mm/kasan/kasan_init_32.c pmd_t *pmd; pmd 39 arch/powerpc/mm/kasan/kasan_init_32.c pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); pmd 41 arch/powerpc/mm/kasan/kasan_init_32.c for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { pmd 45 arch/powerpc/mm/kasan/kasan_init_32.c if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) pmd 61 arch/powerpc/mm/kasan/kasan_init_32.c if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) { pmd 62 arch/powerpc/mm/kasan/kasan_init_32.c pmd_populate_kernel(&init_mm, pmd, new); pmd 97 arch/powerpc/mm/kasan/kasan_init_32.c pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pmd 104 arch/powerpc/mm/kasan/kasan_init_32.c __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); pmd 121 arch/powerpc/mm/kasan/kasan_init_32.c pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); pmd 122 arch/powerpc/mm/kasan/kasan_init_32.c pte_t *ptep = pte_offset_kernel(pmd, k_cur); pmd 208 arch/powerpc/mm/kasan/kasan_init_32.c pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr); pmd 216 arch/powerpc/mm/kasan/kasan_init_32.c pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte); pmd 217 arch/powerpc/mm/kasan/kasan_init_32.c } while (pmd++, addr = next, addr != end); pmd 269 arch/powerpc/mm/pgtable.c pmd_t *pmd; pmd 277 arch/powerpc/mm/pgtable.c pmd = pmd_offset(pud, addr); pmd 284 arch/powerpc/mm/pgtable.c if (pmd_none(*pmd)) pmd 286 arch/powerpc/mm/pgtable.c BUG_ON(!pmd_present(*pmd)); pmd 287 arch/powerpc/mm/pgtable.c assert_spin_locked(pte_lockptr(mm, pmd)); pmd 317 arch/powerpc/mm/pgtable.c pmd_t pmd, *pmdp; pmd 373 arch/powerpc/mm/pgtable.c pmd = READ_ONCE(*pmdp); pmd 379 arch/powerpc/mm/pgtable.c if (pmd_none(pmd)) pmd 389 arch/powerpc/mm/pgtable.c if (pmd_is_serializing(pmd)) pmd 393 arch/powerpc/mm/pgtable.c if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { pmd 400 arch/powerpc/mm/pgtable.c if (pmd_is_leaf(pmd)) { pmd 405 arch/powerpc/mm/pgtable.c if (is_hugepd(__hugepd(pmd_val(pmd)))) { pmd 406 arch/powerpc/mm/pgtable.c hpdp = (hugepd_t *)&pmd; pmd 410 arch/powerpc/mm/pgtable.c return pte_offset_kernel(&pmd, ea); pmd 137 arch/powerpc/mm/pgtable_32.c pmd_t *pmd; pmd 145 arch/powerpc/mm/pgtable_32.c pmd = pmd_offset(pud, addr & PAGE_MASK); pmd 146 arch/powerpc/mm/pgtable_32.c if (pmd_present(*pmd)) { pmd 147 arch/powerpc/mm/pgtable_32.c pte = pte_offset_map(pmd, addr & PAGE_MASK); pmd 152 arch/powerpc/mm/pgtable_32.c *pmdp = pmd; pmd 127 arch/powerpc/mm/pgtable_64.c struct page *pmd_page(pmd_t pmd) pmd 129 arch/powerpc/mm/pgtable_64.c if (pmd_is_leaf(pmd)) { pmd 130 arch/powerpc/mm/pgtable_64.c VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); pmd 131 arch/powerpc/mm/pgtable_64.c return pte_page(pmd_pte(pmd)); pmd 133 arch/powerpc/mm/pgtable_64.c return virt_to_page(pmd_page_vaddr(pmd)); pmd 372 arch/powerpc/mm/ptdump/hashpagetable.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) pmd 374 arch/powerpc/mm/ptdump/hashpagetable.c pte_t *pte = pte_offset_kernel(pmd, 0); pmd 408 arch/powerpc/mm/ptdump/hashpagetable.c pmd_t *pmd = pmd_offset(pud, 0); pmd 412 arch/powerpc/mm/ptdump/hashpagetable.c for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { pmd 414 arch/powerpc/mm/ptdump/hashpagetable.c if (!pmd_none(*pmd)) pmd 416 arch/powerpc/mm/ptdump/hashpagetable.c walk_pte(st, pmd, addr); pmd 252 arch/powerpc/mm/ptdump/ptdump.c static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) pmd 254 arch/powerpc/mm/ptdump/ptdump.c pte_t *pte = pte_offset_kernel(pmd, 0); pmd 267 arch/powerpc/mm/ptdump/ptdump.c pmd_t *pmd = pmd_offset(pud, 0); pmd 271 arch/powerpc/mm/ptdump/ptdump.c for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { pmd 273 arch/powerpc/mm/ptdump/ptdump.c if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd)) pmd 275 arch/powerpc/mm/ptdump/ptdump.c walk_pte(st, pmd, addr); pmd 277 arch/powerpc/mm/ptdump/ptdump.c note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); pmd 16 arch/riscv/include/asm/pgalloc.h pmd_t *pmd, pte_t *pte) pmd 20 arch/riscv/include/asm/pgalloc.h set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); pmd 24 arch/riscv/include/asm/pgalloc.h pmd_t *pmd, pgtable_t pte) pmd 28 arch/riscv/include/asm/pgalloc.h set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); pmd 32 arch/riscv/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 34 arch/riscv/include/asm/pgalloc.h unsigned long pfn = virt_to_pfn(pmd); pmd 40 arch/riscv/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 70 arch/riscv/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 72 arch/riscv/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 75 arch/riscv/include/asm/pgalloc.h #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) pmd 23 arch/riscv/include/asm/pgtable-64.h unsigned long pmd; pmd 26 arch/riscv/include/asm/pgtable-64.h #define pmd_val(x) ((x).pmd) pmd 73 arch/riscv/include/asm/pgtable-64.h static inline unsigned long _pmd_pfn(pmd_t pmd) pmd 75 arch/riscv/include/asm/pgtable-64.h return pmd_val(pmd) >> _PAGE_PFN_SHIFT; pmd 123 arch/riscv/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 125 arch/riscv/include/asm/pgtable.h return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); pmd 128 arch/riscv/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 130 arch/riscv/include/asm/pgtable.h return (pmd_val(pmd) == 0); pmd 133 arch/riscv/include/asm/pgtable.h static inline int pmd_bad(pmd_t pmd) pmd 135 arch/riscv/include/asm/pgtable.h return !pmd_present(pmd); pmd 138 arch/riscv/include/asm/pgtable.h static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 140 arch/riscv/include/asm/pgtable.h *pmdp = pmd; pmd 168 arch/riscv/include/asm/pgtable.h static inline struct page *pmd_page(pmd_t pmd) pmd 170 arch/riscv/include/asm/pgtable.h return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); pmd 173 arch/riscv/include/asm/pgtable.h static inline unsigned long pmd_page_vaddr(pmd_t pmd) pmd 175 arch/riscv/include/asm/pgtable.h return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); pmd 196 arch/riscv/include/asm/pgtable.h static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) pmd 198 arch/riscv/include/asm/pgtable.h return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); pmd 218 arch/riscv/mm/fault.c pmd_t *pmd, *pmd_k; pmd 256 arch/riscv/mm/fault.c pmd = pmd_offset(pud, addr); pmd 260 arch/riscv/mm/fault.c set_pmd(pmd, *pmd_k); pmd 11 arch/riscv/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 13 arch/riscv/mm/hugetlbpage.c return pmd_present(pmd) && pmd 14 arch/riscv/mm/hugetlbpage.c (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); pmd 82 arch/s390/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 91 arch/s390/include/asm/page.h #define pmd_val(x) ((x).pmd) pmd 94 arch/s390/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 98 arch/s390/include/asm/pgalloc.h pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd 99 arch/s390/include/asm/pgalloc.h crst_table_free(mm, (unsigned long *) pmd); pmd 112 arch/s390/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 114 arch/s390/include/asm/pgalloc.h pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); pmd 141 arch/s390/include/asm/pgalloc.h pmd_t *pmd, pgtable_t pte) pmd 143 arch/s390/include/asm/pgalloc.h pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); pmd 146 arch/s390/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) pmd 148 arch/s390/include/asm/pgalloc.h #define pmd_pgtable(pmd) \ pmd 149 arch/s390/include/asm/pgalloc.h (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) pmd 695 arch/s390/include/asm/pgtable.h static inline int pmd_large(pmd_t pmd) pmd 697 arch/s390/include/asm/pgtable.h return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; pmd 700 arch/s390/include/asm/pgtable.h static inline int pmd_bad(pmd_t pmd) pmd 702 arch/s390/include/asm/pgtable.h if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0) pmd 704 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) pmd 705 arch/s390/include/asm/pgtable.h return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; pmd 706 arch/s390/include/asm/pgtable.h return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; pmd 733 arch/s390/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 735 arch/s390/include/asm/pgtable.h return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; pmd 738 arch/s390/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 740 arch/s390/include/asm/pgtable.h return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; pmd 743 arch/s390/include/asm/pgtable.h static inline unsigned long pmd_pfn(pmd_t pmd) pmd 748 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) pmd 750 arch/s390/include/asm/pgtable.h return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; pmd 754 arch/s390/include/asm/pgtable.h static inline int pmd_write(pmd_t pmd) pmd 756 arch/s390/include/asm/pgtable.h return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; pmd 765 arch/s390/include/asm/pgtable.h static inline int pmd_dirty(pmd_t pmd) pmd 768 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) pmd 769 arch/s390/include/asm/pgtable.h dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; pmd 773 arch/s390/include/asm/pgtable.h static inline int pmd_young(pmd_t pmd) pmd 776 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) pmd 777 arch/s390/include/asm/pgtable.h young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; pmd 817 arch/s390/include/asm/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 820 arch/s390/include/asm/pgtable.h return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); pmd 844 arch/s390/include/asm/pgtable.h static inline int pmd_soft_dirty(pmd_t pmd) pmd 846 arch/s390/include/asm/pgtable.h return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; pmd 849 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) pmd 851 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; pmd 852 arch/s390/include/asm/pgtable.h return pmd; pmd 855 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) pmd 857 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; pmd 858 arch/s390/include/asm/pgtable.h return pmd; pmd 1219 arch/s390/include/asm/pgtable.h #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) pmd 1271 arch/s390/include/asm/pgtable.h static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) pmd 1273 arch/s390/include/asm/pgtable.h return (pte_t *) pmd_deref(*pmd) + pte_index(address); pmd 1276 arch/s390/include/asm/pgtable.h #define pte_offset_kernel(pmd, address) pte_offset(pmd, address) pmd 1277 arch/s390/include/asm/pgtable.h #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) pmd 1291 arch/s390/include/asm/pgtable.h #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) pmd 1296 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd 1298 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; pmd 1299 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd 1300 arch/s390/include/asm/pgtable.h return pmd; pmd 1303 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd 1305 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; pmd 1306 arch/s390/include/asm/pgtable.h if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) pmd 1307 arch/s390/include/asm/pgtable.h return pmd; pmd 1308 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd 1309 arch/s390/include/asm/pgtable.h return pmd; pmd 1312 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkclean(pmd_t pmd) pmd 1314 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) { pmd 1315 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; pmd 1316 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd 1318 arch/s390/include/asm/pgtable.h return pmd; pmd 1321 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd 1323 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) { pmd 1324 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | pmd 1326 arch/s390/include/asm/pgtable.h if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) pmd 1327 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; pmd 1329 arch/s390/include/asm/pgtable.h return pmd; pmd 1386 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd 1388 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) { pmd 1389 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd 1390 arch/s390/include/asm/pgtable.h if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) pmd 1391 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; pmd 1393 arch/s390/include/asm/pgtable.h return pmd; pmd 1396 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkold(pmd_t pmd) pmd 1398 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) { pmd 1399 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; pmd 1400 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; pmd 1402 arch/s390/include/asm/pgtable.h return pmd; pmd 1405 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 1407 arch/s390/include/asm/pgtable.h if (pmd_large(pmd)) { pmd 1408 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | pmd 1411 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= massage_pgprot_pmd(newprot); pmd 1412 arch/s390/include/asm/pgtable.h if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) pmd 1413 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd 1414 arch/s390/include/asm/pgtable.h if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) pmd 1415 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; pmd 1416 arch/s390/include/asm/pgtable.h return pmd; pmd 1418 arch/s390/include/asm/pgtable.h pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN; pmd 1419 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= massage_pgprot_pmd(newprot); pmd 1420 arch/s390/include/asm/pgtable.h return pmd; pmd 1531 arch/s390/include/asm/pgtable.h pmd_t pmd = *pmdp; pmd 1533 arch/s390/include/asm/pgtable.h pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); pmd 1534 arch/s390/include/asm/pgtable.h return pmd_young(pmd); pmd 1553 arch/s390/include/asm/pgtable.h static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd 1555 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; pmd 1556 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; pmd 1557 arch/s390/include/asm/pgtable.h pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; pmd 1558 arch/s390/include/asm/pgtable.h return pmd; pmd 1574 arch/s390/include/asm/pgtable.h pmd_t pmd = *pmdp; pmd 1576 arch/s390/include/asm/pgtable.h return pmd; pmd 1592 arch/s390/include/asm/pgtable.h pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); pmd 1594 arch/s390/include/asm/pgtable.h return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); pmd 1601 arch/s390/include/asm/pgtable.h pmd_t pmd = *pmdp; pmd 1603 arch/s390/include/asm/pgtable.h if (pmd_write(pmd)) pmd 1604 arch/s390/include/asm/pgtable.h pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); pmd 1618 arch/s390/include/asm/pgtable.h static inline int pmd_trans_huge(pmd_t pmd) pmd 1620 arch/s390/include/asm/pgtable.h return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; pmd 86 arch/s390/include/asm/tlb.h static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, pmd 91 arch/s390/include/asm/tlb.h pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd 96 arch/s390/include/asm/tlb.h tlb_remove_table(tlb, pmd); pmd 134 arch/s390/mm/dump_pagetables.c pmd_t *pmd, unsigned long addr) pmd 142 arch/s390/mm/dump_pagetables.c pte = pte_offset_kernel(pmd, addr); pmd 154 arch/s390/mm/dump_pagetables.c pmd_t *pmd; pmd 164 arch/s390/mm/dump_pagetables.c pmd = pmd_offset(pud, addr); pmd 165 arch/s390/mm/dump_pagetables.c for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) { pmd 167 arch/s390/mm/dump_pagetables.c if (!pmd_none(*pmd)) { pmd 168 arch/s390/mm/dump_pagetables.c if (pmd_large(*pmd)) { pmd 169 arch/s390/mm/dump_pagetables.c prot = pmd_val(*pmd) & pmd 174 arch/s390/mm/dump_pagetables.c walk_pte_level(m, st, pmd, addr); pmd 548 arch/s390/mm/gmap.c pmd_t *pmd; pmd 591 arch/s390/mm/gmap.c pmd = pmd_offset(pud, vmaddr); pmd 592 arch/s390/mm/gmap.c VM_BUG_ON(pmd_none(*pmd)); pmd 594 arch/s390/mm/gmap.c if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) pmd 600 arch/s390/mm/gmap.c ptl = pmd_lock(mm, pmd); pmd 606 arch/s390/mm/gmap.c if (pmd_large(*pmd)) { pmd 607 arch/s390/mm/gmap.c *table = (pmd_val(*pmd) & pmd 611 arch/s390/mm/gmap.c *table = pmd_val(*pmd) & pmd 615 arch/s390/mm/gmap.c !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) { pmd 2512 arch/s390/mm/gmap.c static int __zap_zero_pages(pmd_t *pmd, unsigned long start, pmd 2521 arch/s390/mm/gmap.c ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pmd 2572 arch/s390/mm/gmap.c pmd_t *pmd = (pmd_t *)pte; pmd 2574 arch/s390/mm/gmap.c struct page *page = pmd_page(*pmd); pmd 2582 arch/s390/mm/gmap.c if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID || pmd 2583 arch/s390/mm/gmap.c !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE)) pmd 2586 arch/s390/mm/gmap.c start = pmd_val(*pmd) & HPAGE_MASK; pmd 237 arch/s390/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 239 arch/s390/mm/hugetlbpage.c return pmd_large(pmd); pmd 108 arch/s390/mm/page-states.c pmd_t *pmd; pmd 110 arch/s390/mm/page-states.c pmd = pmd_offset(pud, addr); pmd 113 arch/s390/mm/page-states.c if (pmd_none(*pmd) || pmd_large(*pmd)) pmd 115 arch/s390/mm/page-states.c page = virt_to_page(pmd_val(*pmd)); pmd 117 arch/s390/mm/page-states.c } while (pmd++, addr = next, addr != end); pmd 344 arch/s390/mm/pageattr.c pmd_t *pmd; pmd 352 arch/s390/mm/pageattr.c pmd = pmd_offset(pud, address); pmd 353 arch/s390/mm/pageattr.c pte = pte_offset_kernel(pmd, address); pmd 419 arch/s390/mm/pgtable.c pmd_t *pmd; pmd 428 arch/s390/mm/pgtable.c pmd = pmd_alloc(mm, pud, addr); pmd 429 arch/s390/mm/pgtable.c return pmd; pmd 12 arch/sh/include/asm/pgalloc.h extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); pmd 14 arch/sh/include/asm/pgalloc.h extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); pmd 17 arch/sh/include/asm/pgalloc.h static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd 20 arch/sh/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)pte)); pmd 23 arch/sh/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 26 arch/sh/include/asm/pgalloc.h set_pmd(pmd, __pmd((unsigned long)page_address(pte))); pmd 28 arch/sh/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 32 arch/sh/include/asm/pgtable-3level.h typedef struct { unsigned long long pmd; } pmd_t; pmd 33 arch/sh/include/asm/pgtable-3level.h #define pmd_val(x) ((x).pmd) pmd 404 arch/sh/include/asm/pgtable_32.h #define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd)) pmd 405 arch/sh/include/asm/pgtable_32.h #define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) pmd 71 arch/sh/include/asm/pgtable_64.h #define pmd_page(pmd) \ pmd 72 arch/sh/include/asm/pgtable_64.h (virt_to_page(pmd_val(pmd))) pmd 213 arch/sh/mm/cache-sh4.c pmd_t *pmd; pmd 228 arch/sh/mm/cache-sh4.c pmd = pmd_offset(pud, address); pmd 229 arch/sh/mm/cache-sh4.c pte = pte_offset_kernel(pmd, address); pmd 387 arch/sh/mm/cache-sh5.c pmd_t *pmd; pmd 404 arch/sh/mm/cache-sh5.c pmd = pmd_offset(pud, addr); pmd 405 arch/sh/mm/cache-sh5.c if (pmd_none(*pmd) || pmd_bad(*pmd)) pmd 408 arch/sh/mm/cache-sh5.c pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 57 arch/sh/mm/fault.c pmd_t *pmd; pmd 81 arch/sh/mm/fault.c pmd = pmd_offset(pud, addr); pmd 83 arch/sh/mm/fault.c printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), pmd 84 arch/sh/mm/fault.c (u64)pmd_val(*pmd)); pmd 86 arch/sh/mm/fault.c if (pmd_none(*pmd)) pmd 89 arch/sh/mm/fault.c if (pmd_bad(*pmd)) { pmd 95 arch/sh/mm/fault.c if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) pmd 98 arch/sh/mm/fault.c pte = pte_offset_kernel(pmd, addr); pmd 111 arch/sh/mm/fault.c pmd_t *pmd, *pmd_k; pmd 127 arch/sh/mm/fault.c pmd = pmd_offset(pud, address); pmd 132 arch/sh/mm/fault.c if (!pmd_present(*pmd)) pmd 133 arch/sh/mm/fault.c set_pmd(pmd, *pmd_k); pmd 140 arch/sh/mm/fault.c BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); pmd 30 arch/sh/mm/hugetlbpage.c pmd_t *pmd; pmd 37 arch/sh/mm/hugetlbpage.c pmd = pmd_alloc(mm, pud, addr); pmd 38 arch/sh/mm/hugetlbpage.c if (pmd) pmd 39 arch/sh/mm/hugetlbpage.c pte = pte_alloc_map(mm, pmd, addr); pmd 51 arch/sh/mm/hugetlbpage.c pmd_t *pmd; pmd 58 arch/sh/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 59 arch/sh/mm/hugetlbpage.c if (pmd) pmd 60 arch/sh/mm/hugetlbpage.c pte = pte_offset_map(pmd, addr); pmd 67 arch/sh/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 49 arch/sh/mm/init.c pmd_t *pmd; pmd 63 arch/sh/mm/init.c pmd = pmd_alloc(NULL, pud, addr); pmd 64 arch/sh/mm/init.c if (unlikely(!pmd)) { pmd 65 arch/sh/mm/init.c pmd_ERROR(*pmd); pmd 69 arch/sh/mm/init.c return pte_offset_kernel(pmd, addr); pmd 129 arch/sh/mm/init.c pmd_t *pmd; pmd 131 arch/sh/mm/init.c pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pmd 132 arch/sh/mm/init.c if (!pmd) pmd 135 arch/sh/mm/init.c pud_populate(&init_mm, pud, pmd); pmd 136 arch/sh/mm/init.c BUG_ON(pmd != pmd_offset(pud, 0)); pmd 142 arch/sh/mm/init.c static pte_t * __init one_page_table_init(pmd_t *pmd) pmd 144 arch/sh/mm/init.c if (pmd_none(*pmd)) { pmd 151 arch/sh/mm/init.c pmd_populate_kernel(&init_mm, pmd, pte); pmd 152 arch/sh/mm/init.c BUG_ON(pte != pte_offset_kernel(pmd, 0)); pmd 155 arch/sh/mm/init.c return pte_offset_kernel(pmd, 0); pmd 158 arch/sh/mm/init.c static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, pmd 169 arch/sh/mm/init.c pmd_t *pmd; pmd 183 arch/sh/mm/init.c pmd = one_md_table_init(pud); pmd 185 arch/sh/mm/init.c pmd += k; pmd 187 arch/sh/mm/init.c for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { pmd 188 arch/sh/mm/init.c pte = page_table_kmap_check(one_page_table_init(pmd), pmd 189 arch/sh/mm/init.c pmd, vaddr, pte); pmd 44 arch/sh/mm/pgtable.c void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 46 arch/sh/mm/pgtable.c set_pud(pud, __pud((unsigned long)pmd)); pmd 54 arch/sh/mm/pgtable.c void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 56 arch/sh/mm/pgtable.c kmem_cache_free(pmd_cachep, pmd); pmd 27 arch/sh/mm/tlbex_32.c pmd_t *pmd; pmd 48 arch/sh/mm/tlbex_32.c pmd = pmd_offset(pud, address); pmd 49 arch/sh/mm/tlbex_32.c if (pmd_none_or_clear_bad(pmd)) pmd 51 arch/sh/mm/tlbex_32.c pte = pte_offset_kernel(pmd, address); pmd 48 arch/sh/mm/tlbex_64.c pmd_t *pmd; pmd 65 arch/sh/mm/tlbex_64.c pmd = pmd_offset(pud, address); pmd 66 arch/sh/mm/tlbex_64.c if (pmd_none(*pmd) || !pmd_present(*pmd)) pmd 69 arch/sh/mm/tlbex_64.c pte = pte_offset_kernel(pmd, address); pmd 71 arch/sparc/include/asm/page_64.h typedef struct { unsigned long pmd; } pmd_t; pmd 78 arch/sparc/include/asm/page_64.h #define pmd_val(x) ((x).pmd) pmd 45 arch/sparc/include/asm/pgalloc_32.h static inline void free_pmd_fast(pmd_t * pmd) pmd 47 arch/sparc/include/asm/pgalloc_32.h srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE); pmd 50 arch/sparc/include/asm/pgalloc_32.h #define pmd_free(mm, pmd) free_pmd_fast(pmd) pmd 51 arch/sparc/include/asm/pgalloc_32.h #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) pmd 54 arch/sparc/include/asm/pgalloc_32.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 36 arch/sparc/include/asm/pgalloc_64.h static inline void __pud_populate(pud_t *pud, pmd_t *pmd) pmd 38 arch/sparc/include/asm/pgalloc_64.h pud_set(pud, pmd); pmd 58 arch/sparc/include/asm/pgalloc_64.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 60 arch/sparc/include/asm/pgalloc_64.h kmem_cache_free(pgtable_cache, pmd); pmd 109 arch/sparc/include/asm/pgalloc_64.h #define __pmd_free_tlb(tlb, pmd, addr) \ pmd 110 arch/sparc/include/asm/pgalloc_64.h pgtable_free_tlb(tlb, pmd, false) pmd 128 arch/sparc/include/asm/pgtable_32.h static inline struct page *pmd_page(pmd_t pmd) pmd 130 arch/sparc/include/asm/pgtable_32.h if (srmmu_device_memory(pmd_val(pmd))) pmd 132 arch/sparc/include/asm/pgtable_32.h return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); pmd 165 arch/sparc/include/asm/pgtable_32.h static inline int pmd_bad(pmd_t pmd) pmd 167 arch/sparc/include/asm/pgtable_32.h return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; pmd 170 arch/sparc/include/asm/pgtable_32.h static inline int pmd_present(pmd_t pmd) pmd 172 arch/sparc/include/asm/pgtable_32.h return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); pmd 175 arch/sparc/include/asm/pgtable_32.h static inline int pmd_none(pmd_t pmd) pmd 177 arch/sparc/include/asm/pgtable_32.h return !pmd_val(pmd); pmd 339 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 341 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 413 arch/sparc/include/asm/pgtable_64.h static inline bool is_hugetlb_pmd(pmd_t pmd) pmd 415 arch/sparc/include/asm/pgtable_64.h return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); pmd 424 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd 426 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 686 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_large(pmd_t pmd) pmd 688 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 693 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_pfn(pmd_t pmd) pmd 695 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 701 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_write(pmd_t pmd) pmd 703 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 711 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_dirty(pmd_t pmd) pmd 713 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 718 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_young(pmd_t pmd) pmd 720 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 725 arch/sparc/include/asm/pgtable_64.h static inline unsigned long pmd_trans_huge(pmd_t pmd) pmd 727 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 732 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkold(pmd_t pmd) pmd 734 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 741 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd 743 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 750 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd 752 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 759 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkclean(pmd_t pmd) pmd 761 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 768 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd 770 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 777 arch/sparc/include/asm/pgtable_64.h static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd 779 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 794 arch/sparc/include/asm/pgtable_64.h static inline int pmd_present(pmd_t pmd) pmd 796 arch/sparc/include/asm/pgtable_64.h return pmd_val(pmd) != 0UL; pmd 799 arch/sparc/include/asm/pgtable_64.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 807 arch/sparc/include/asm/pgtable_64.h #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) pmd 819 arch/sparc/include/asm/pgtable_64.h pmd_t *pmdp, pmd_t pmd); pmd 822 arch/sparc/include/asm/pgtable_64.h pmd_t *pmdp, pmd_t pmd) pmd 824 arch/sparc/include/asm/pgtable_64.h *pmdp = pmd; pmd 837 arch/sparc/include/asm/pgtable_64.h static inline unsigned long __pmd_page(pmd_t pmd) pmd 839 arch/sparc/include/asm/pgtable_64.h pte_t pte = __pte(pmd_val(pmd)); pmd 857 arch/sparc/include/asm/pgtable_64.h #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) pmd 942 arch/sparc/include/asm/pgtable_64.h pmd_t pmd = *pmdp; pmd 944 arch/sparc/include/asm/pgtable_64.h return pmd; pmd 995 arch/sparc/include/asm/pgtable_64.h pmd_t *pmd); pmd 1625 arch/sparc/kernel/smp_64.c pmd_t *pmd; pmd 1646 arch/sparc/kernel/smp_64.c pmd = pmd_offset(pud, addr); pmd 1647 arch/sparc/kernel/smp_64.c if (!pmd_present(*pmd)) { pmd 1653 arch/sparc/kernel/smp_64.c pmd_populate_kernel(&init_mm, pmd, new); pmd 354 arch/sparc/mm/fault_32.c pmd_t *pmd, *pmd_k; pmd 366 arch/sparc/mm/fault_32.c pmd = pmd_offset(pgd, address); pmd 369 arch/sparc/mm/fault_32.c if (pmd_present(*pmd) || !pmd_present(*pmd_k)) pmd 372 arch/sparc/mm/fault_32.c *pmd = *pmd_k; pmd 281 arch/sparc/mm/hugetlbpage.c pmd_t *pmd; pmd 289 arch/sparc/mm/hugetlbpage.c pmd = pmd_alloc(mm, pud, addr); pmd 290 arch/sparc/mm/hugetlbpage.c if (!pmd) pmd 293 arch/sparc/mm/hugetlbpage.c return (pte_t *)pmd; pmd 294 arch/sparc/mm/hugetlbpage.c return pte_alloc_map(mm, pmd, addr); pmd 302 arch/sparc/mm/hugetlbpage.c pmd_t *pmd; pmd 312 arch/sparc/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 313 arch/sparc/mm/hugetlbpage.c if (pmd_none(*pmd)) pmd 315 arch/sparc/mm/hugetlbpage.c if (is_hugetlb_pmd(*pmd)) pmd 316 arch/sparc/mm/hugetlbpage.c return (pte_t *)pmd; pmd 317 arch/sparc/mm/hugetlbpage.c return pte_offset_map(pmd, addr); pmd 393 arch/sparc/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 395 arch/sparc/mm/hugetlbpage.c return !pmd_none(pmd) && pmd 396 arch/sparc/mm/hugetlbpage.c (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; pmd 405 arch/sparc/mm/hugetlbpage.c static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, pmd 408 arch/sparc/mm/hugetlbpage.c pgtable_t token = pmd_pgtable(*pmd); pmd 410 arch/sparc/mm/hugetlbpage.c pmd_clear(pmd); pmd 419 arch/sparc/mm/hugetlbpage.c pmd_t *pmd; pmd 424 arch/sparc/mm/hugetlbpage.c pmd = pmd_offset(pud, addr); pmd 427 arch/sparc/mm/hugetlbpage.c if (pmd_none(*pmd)) pmd 429 arch/sparc/mm/hugetlbpage.c if (is_hugetlb_pmd(*pmd)) pmd 430 arch/sparc/mm/hugetlbpage.c pmd_clear(pmd); pmd 432 arch/sparc/mm/hugetlbpage.c hugetlb_free_pte_range(tlb, pmd, addr); pmd 433 arch/sparc/mm/hugetlbpage.c } while (pmd++, addr = next, addr != end); pmd 446 arch/sparc/mm/hugetlbpage.c pmd = pmd_offset(pud, start); pmd 448 arch/sparc/mm/hugetlbpage.c pmd_free_tlb(tlb, pmd, start); pmd 1657 arch/sparc/mm/init_64.c pmd_t *pmd; pmd 1684 arch/sparc/mm/init_64.c pmd = pmd_offset(pud, addr); pmd 1685 arch/sparc/mm/init_64.c if (pmd_none(*pmd)) pmd 1688 arch/sparc/mm/init_64.c if (pmd_large(*pmd)) pmd 1689 arch/sparc/mm/init_64.c return pfn_valid(pmd_pfn(*pmd)); pmd 1691 arch/sparc/mm/init_64.c pte = pte_offset_kernel(pmd, addr); pmd 1740 arch/sparc/mm/init_64.c pmd_t *pmd) pmd 1750 arch/sparc/mm/init_64.c pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; pmd 1767 arch/sparc/mm/init_64.c pmd_val(*pmd) = pte_val; pmd 1771 arch/sparc/mm/init_64.c pmd++; pmd 1804 arch/sparc/mm/init_64.c pmd_t *pmd; pmd 1833 arch/sparc/mm/init_64.c pmd = pmd_offset(pud, vstart); pmd 1834 arch/sparc/mm/init_64.c if (pmd_none(*pmd)) { pmd 1838 arch/sparc/mm/init_64.c vstart = kernel_map_hugepmd(vstart, vend, pmd); pmd 1846 arch/sparc/mm/init_64.c pmd_populate_kernel(&init_mm, pmd, new); pmd 1849 arch/sparc/mm/init_64.c pte = pte_offset_kernel(pmd, vstart); pmd 2616 arch/sparc/mm/init_64.c pmd_t *pmd; pmd 2625 arch/sparc/mm/init_64.c pmd = pmd_offset(pud, vstart); pmd 2626 arch/sparc/mm/init_64.c pte = pmd_val(*pmd); pmd 2633 arch/sparc/mm/init_64.c pmd_val(*pmd) = pte_base | __pa(block); pmd 2941 arch/sparc/mm/init_64.c pmd_t *pmd) pmd 2945 arch/sparc/mm/init_64.c pmd_t entry = *pmd; pmd 40 arch/sparc/mm/leon_mm.c unsigned int pgd, pmd, ped; pmd 93 arch/sparc/mm/leon_mm.c pmd = LEON_BYPASS_LOAD_PA(ptr); pmd 94 arch/sparc/mm/leon_mm.c if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { pmd 98 arch/sparc/mm/leon_mm.c pte = pmd; pmd 99 arch/sparc/mm/leon_mm.c paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; pmd 102 arch/sparc/mm/leon_mm.c if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { pmd 109 arch/sparc/mm/leon_mm.c printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); pmd 111 arch/sparc/mm/leon_mm.c ptr = (pmd & SRMMU_PTD_PMASK) << 4; pmd 106 arch/sparc/mm/srmmu.c static inline int srmmu_pmd_none(pmd_t pmd) pmd 107 arch/sparc/mm/srmmu.c { return !(pmd_val(pmd) & 0xFFFFFFF); } pmd 299 arch/sparc/mm/srmmu.c pmd_t *pmd; pmd 332 arch/sparc/mm/srmmu.c pmd = pmd_offset(__nocache_fix(pgd), vaddr); pmd 333 arch/sparc/mm/srmmu.c pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); pmd 909 arch/sparc/mm/srmmu.c pmd_t *pmd; pmd 970 arch/sparc/mm/srmmu.c pmd = pmd_offset(pgd, PKMAP_BASE); pmd 971 arch/sparc/mm/srmmu.c pte = pte_offset_kernel(pmd, PKMAP_BASE); pmd 147 arch/sparc/mm/tlb.c pmd_t pmd) pmd 152 arch/sparc/mm/tlb.c pte = pte_offset_map(&pmd, vaddr); pmd 168 arch/sparc/mm/tlb.c pmd_t orig, pmd_t pmd) pmd 173 arch/sparc/mm/tlb.c if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { pmd 182 arch/sparc/mm/tlb.c if (pmd_val(pmd) & _PAGE_PMD_HUGE) { pmd 183 arch/sparc/mm/tlb.c if (is_huge_zero_page(pmd_page(pmd))) pmd 220 arch/sparc/mm/tlb.c pmd_t *pmdp, pmd_t pmd) pmd 224 arch/sparc/mm/tlb.c *pmdp = pmd; pmd 225 arch/sparc/mm/tlb.c __set_pmd_acct(mm, addr, orig, pmd); pmd 229 arch/sparc/mm/tlb.c unsigned long address, pmd_t *pmdp, pmd_t pmd) pmd 235 arch/sparc/mm/tlb.c } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); pmd 236 arch/sparc/mm/tlb.c __set_pmd_acct(vma->vm_mm, address, old, pmd); pmd 38 arch/um/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 50 arch/um/include/asm/page.h #define pmd_val(x) ((x).pmd) pmd 61 arch/um/include/asm/page.h typedef struct { unsigned long pmd; } pmd_t; pmd 62 arch/um/include/asm/page.h #define pmd_val(x) ((x).pmd) pmd 15 arch/um/include/asm/pgalloc.h #define pmd_populate_kernel(mm, pmd, pte) \ pmd 16 arch/um/include/asm/pgalloc.h set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte))) pmd 18 arch/um/include/asm/pgalloc.h #define pmd_populate(mm, pmd, pte) \ pmd 19 arch/um/include/asm/pgalloc.h set_pmd(pmd, __pmd(_PAGE_TABLE + \ pmd 22 arch/um/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 38 arch/um/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 40 arch/um/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 60 arch/um/include/asm/pgtable-3level.h #define pud_populate(mm, pud, pmd) \ pmd 61 arch/um/include/asm/pgtable-3level.h set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) pmd 109 arch/um/include/asm/pgtable.h #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) pmd 327 arch/um/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) pmd 330 arch/um/include/asm/pgtable.h #define pmd_page_vaddr(pmd) \ pmd 331 arch/um/include/asm/pgtable.h ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) pmd 65 arch/um/kernel/mem.c static void __init one_page_table_init(pmd_t *pmd) pmd 67 arch/um/kernel/mem.c if (pmd_none(*pmd)) { pmd 74 arch/um/kernel/mem.c set_pmd(pmd, __pmd(_KERNPG_TABLE + pmd 76 arch/um/kernel/mem.c if (pte != pte_offset_kernel(pmd, 0)) pmd 100 arch/um/kernel/mem.c pmd_t *pmd; pmd 113 arch/um/kernel/mem.c pmd = pmd_offset(pud, vaddr); pmd 114 arch/um/kernel/mem.c for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) { pmd 115 arch/um/kernel/mem.c one_page_table_init(pmd); pmd 128 arch/um/kernel/mem.c pmd_t *pmd; pmd 148 arch/um/kernel/mem.c pmd = pmd_offset(pud, vaddr); pmd 149 arch/um/kernel/mem.c pte = pte_offset_kernel(pmd, vaddr); pmd 215 arch/um/kernel/mem.c pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL); pmd 217 arch/um/kernel/mem.c if (pmd) pmd 218 arch/um/kernel/mem.c memset(pmd, 0, PAGE_SIZE); pmd 220 arch/um/kernel/mem.c return pmd; pmd 23 arch/um/kernel/skas/mmu.c pmd_t *pmd; pmd 31 arch/um/kernel/skas/mmu.c pmd = pmd_alloc(mm, pud, proc); pmd 32 arch/um/kernel/skas/mmu.c if (!pmd) pmd 35 arch/um/kernel/skas/mmu.c pte = pte_alloc_map(mm, pmd, proc); pmd 44 arch/um/kernel/skas/mmu.c pmd_free(mm, pmd); pmd 21 arch/um/kernel/skas/uaccess.c pmd_t *pmd; pmd 34 arch/um/kernel/skas/uaccess.c pmd = pmd_offset(pud, addr); pmd 35 arch/um/kernel/skas/uaccess.c if (!pmd_present(*pmd)) pmd 38 arch/um/kernel/skas/uaccess.c return pte_offset_kernel(pmd, addr); pmd 221 arch/um/kernel/tlb.c static inline int update_pte_range(pmd_t *pmd, unsigned long addr, pmd 228 arch/um/kernel/tlb.c pte = pte_offset_kernel(pmd, addr); pmd 262 arch/um/kernel/tlb.c pmd_t *pmd; pmd 266 arch/um/kernel/tlb.c pmd = pmd_offset(pud, addr); pmd 269 arch/um/kernel/tlb.c if (!pmd_present(*pmd)) { pmd 270 arch/um/kernel/tlb.c if (hvc->force || pmd_newpage(*pmd)) { pmd 272 arch/um/kernel/tlb.c pmd_mkuptodate(*pmd); pmd 275 arch/um/kernel/tlb.c else ret = update_pte_range(pmd, addr, next, hvc); pmd 276 arch/um/kernel/tlb.c } while (pmd++, addr = next, ((addr < end) && !ret)); pmd 342 arch/um/kernel/tlb.c pmd_t *pmd; pmd 383 arch/um/kernel/tlb.c pmd = pmd_offset(pud, addr); pmd 384 arch/um/kernel/tlb.c if (!pmd_present(*pmd)) { pmd 388 arch/um/kernel/tlb.c if (pmd_newpage(*pmd)) { pmd 399 arch/um/kernel/tlb.c pte = pte_offset_kernel(pmd, addr); pmd 428 arch/um/kernel/tlb.c pmd_t *pmd; pmd 444 arch/um/kernel/tlb.c pmd = pmd_offset(pud, address); pmd 445 arch/um/kernel/tlb.c if (!pmd_present(*pmd)) pmd 448 arch/um/kernel/tlb.c pte = pte_offset_kernel(pmd, address); pmd 508 arch/um/kernel/tlb.c pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address) pmd 510 arch/um/kernel/tlb.c return pte_offset_kernel(pmd, address); pmd 517 arch/um/kernel/tlb.c pmd_t *pmd = pmd_offset(pud, addr); pmd 519 arch/um/kernel/tlb.c return pte_offset_map(pmd, addr); pmd 32 arch/um/kernel/trap.c pmd_t *pmd; pmd 108 arch/um/kernel/trap.c pmd = pmd_offset(pud, address); pmd 109 arch/um/kernel/trap.c pte = pte_offset_kernel(pmd, address); pmd 85 arch/unicore32/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 202 arch/unicore32/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 203 arch/unicore32/include/asm/pgtable.h #define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT) pmd 204 arch/unicore32/include/asm/pgtable.h #define pmd_bad(pmd) (((pmd_val(pmd) & \ pmd 219 arch/unicore32/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK)) pmd 220 arch/unicore32/include/asm/pgtable.h #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) pmd 132 arch/unicore32/include/asm/tlbflush.h static inline void flush_pmd_entry(pmd_t *pmd) pmd 143 arch/unicore32/include/asm/tlbflush.h : : "r" (pmd) : "r1", "r2"); pmd 147 arch/unicore32/include/asm/tlbflush.h : : "r" (pmd) : "cc"); pmd 151 arch/unicore32/include/asm/tlbflush.h static inline void clean_pmd_entry(pmd_t *pmd) pmd 156 arch/unicore32/include/asm/tlbflush.h : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc"); pmd 160 arch/unicore32/include/asm/tlbflush.h : : "r" (pmd) : "cc"); pmd 48 arch/unicore32/kernel/hibernate.c static pte_t *resume_one_page_table_init(pmd_t *pmd) pmd 50 arch/unicore32/kernel/hibernate.c if (pmd_none(*pmd)) { pmd 55 arch/unicore32/kernel/hibernate.c set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE)); pmd 57 arch/unicore32/kernel/hibernate.c BUG_ON(page_table != pte_offset_kernel(pmd, 0)); pmd 62 arch/unicore32/kernel/hibernate.c return pte_offset_kernel(pmd, 0); pmd 74 arch/unicore32/kernel/hibernate.c pmd_t *pmd; pmd 83 arch/unicore32/kernel/hibernate.c pmd = resume_one_md_table_init(pgd); pmd 84 arch/unicore32/kernel/hibernate.c if (!pmd) pmd 90 arch/unicore32/kernel/hibernate.c for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { pmd 99 arch/unicore32/kernel/hibernate.c pte = resume_one_page_table_init(pmd); pmd 50 arch/unicore32/mm/fault.c pmd_t *pmd; pmd 61 arch/unicore32/mm/fault.c pmd = pmd_offset((pud_t *) pgd, addr); pmd 63 arch/unicore32/mm/fault.c printk(", *pmd=%08lx", pmd_val(*pmd)); pmd 65 arch/unicore32/mm/fault.c if (pmd_none(*pmd)) pmd 68 arch/unicore32/mm/fault.c if (pmd_bad(*pmd)) { pmd 74 arch/unicore32/mm/fault.c if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) pmd 77 arch/unicore32/mm/fault.c pte = pte_offset_map(pmd, addr); pmd 340 arch/unicore32/mm/fault.c pmd_t *pmd, *pmd_k; pmd 357 arch/unicore32/mm/fault.c pmd = pmd_offset((pud_t *) pgd, addr); pmd 362 arch/unicore32/mm/fault.c set_pmd(pmd, *pmd_k); pmd 363 arch/unicore32/mm/fault.c flush_pmd_entry(pmd); pmd 70 arch/unicore32/mm/ioremap.c pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); pmd 72 arch/unicore32/mm/ioremap.c pmd = *pmdp; pmd 73 arch/unicore32/mm/ioremap.c if (!pmd_none(pmd)) { pmd 86 arch/unicore32/mm/ioremap.c if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pmd 87 arch/unicore32/mm/ioremap.c pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); pmd 112 arch/unicore32/mm/ioremap.c pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); pmd 114 arch/unicore32/mm/ioremap.c set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect)); pmd 116 arch/unicore32/mm/ioremap.c flush_pmd_entry(pmd); pmd 141 arch/unicore32/mm/mmu.c static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, pmd 144 arch/unicore32/mm/mmu.c if (pmd_none(*pmd)) { pmd 152 arch/unicore32/mm/mmu.c __pmd_populate(pmd, __pa(pte) | prot); pmd 154 arch/unicore32/mm/mmu.c BUG_ON(pmd_bad(*pmd)); pmd 155 arch/unicore32/mm/mmu.c return pte_offset_kernel(pmd, addr); pmd 158 arch/unicore32/mm/mmu.c static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, pmd 162 arch/unicore32/mm/mmu.c pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); pmd 173 arch/unicore32/mm/mmu.c pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); pmd 180 arch/unicore32/mm/mmu.c pmd_t *p = pmd; pmd 183 arch/unicore32/mm/mmu.c set_pmd(pmd, __pmd(phys | type->prot_sect)); pmd 185 arch/unicore32/mm/mmu.c } while (pmd++, addr += SECTION_SIZE, addr != end); pmd 193 arch/unicore32/mm/mmu.c alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); pmd 466 arch/unicore32/mm/mmu.c pmd_t *pmd; pmd 468 arch/unicore32/mm/mmu.c pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd 469 arch/unicore32/mm/mmu.c set_pmd(pmd, __pmd(pmdval)); pmd 470 arch/unicore32/mm/mmu.c flush_pmd_entry(pmd); pmd 78 arch/unicore32/mm/pgd.c pmd_t *pmd; pmd 85 arch/unicore32/mm/pgd.c pmd = pmd_off(pgd, 0); pmd 86 arch/unicore32/mm/pgd.c if (pmd_none(*pmd)) pmd 88 arch/unicore32/mm/pgd.c if (pmd_bad(*pmd)) { pmd 89 arch/unicore32/mm/pgd.c pmd_ERROR(*pmd); pmd 90 arch/unicore32/mm/pgd.c pmd_clear(pmd); pmd 94 arch/unicore32/mm/pgd.c pte = pmd_pgtable(*pmd); pmd 95 arch/unicore32/mm/pgd.c pmd_clear(pmd); pmd 98 arch/unicore32/mm/pgd.c pmd_free(mm, pmd); pmd 362 arch/x86/entry/vsyscall/vsyscall_64.c pmd_t *pmd; pmd 372 arch/x86/entry/vsyscall/vsyscall_64.c pmd = pmd_offset(pud, VSYSCALL_ADDR); pmd 373 arch/x86/entry/vsyscall/vsyscall_64.c set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); pmd 155 arch/x86/include/asm/kexec.h pmd_t *pmd; pmd 453 arch/x86/include/asm/paravirt.h static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 455 arch/x86/include/asm/paravirt.h pmdval_t val = native_pmd_val(pmd); pmd 476 arch/x86/include/asm/paravirt.h static inline pmdval_t pmd_val(pmd_t pmd) pmd 482 arch/x86/include/asm/paravirt.h pmd.pmd, (u64)pmd.pmd >> 32); pmd 484 arch/x86/include/asm/paravirt.h ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); pmd 64 arch/x86/include/asm/pgalloc.h pmd_t *pmd, pte_t *pte) pmd 67 arch/x86/include/asm/pgalloc.h set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); pmd 71 arch/x86/include/asm/pgalloc.h pmd_t *pmd, pte_t *pte) pmd 74 arch/x86/include/asm/pgalloc.h set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); pmd 77 arch/x86/include/asm/pgalloc.h static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pmd 83 arch/x86/include/asm/pgalloc.h set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); pmd 86 arch/x86/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 106 arch/x86/include/asm/pgalloc.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 108 arch/x86/include/asm/pgalloc.h BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); pmd 109 arch/x86/include/asm/pgalloc.h pgtable_pmd_page_dtor(virt_to_page(pmd)); pmd 110 arch/x86/include/asm/pgalloc.h free_page((unsigned long)pmd); pmd 113 arch/x86/include/asm/pgalloc.h extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); pmd 115 arch/x86/include/asm/pgalloc.h static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, pmd 118 arch/x86/include/asm/pgalloc.h ___pmd_free_tlb(tlb, pmd); pmd 122 arch/x86/include/asm/pgalloc.h extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); pmd 124 arch/x86/include/asm/pgalloc.h static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 126 arch/x86/include/asm/pgalloc.h paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pmd 127 arch/x86/include/asm/pgalloc.h set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); pmd 130 arch/x86/include/asm/pgalloc.h static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) pmd 132 arch/x86/include/asm/pgalloc.h paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pmd 133 arch/x86/include/asm/pgalloc.h set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); pmd 20 arch/x86/include/asm/pgtable-2level.h static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 22 arch/x86/include/asm/pgtable-2level.h *pmdp = pmd; pmd 96 arch/x86/include/asm/pgtable-3level.h static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 98 arch/x86/include/asm/pgtable-3level.h set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); pmd 122 arch/x86/include/asm/pgtable-3level.h static inline void native_pmd_clear(pmd_t *pmd) pmd 124 arch/x86/include/asm/pgtable-3level.h u32 *tmp = (u32 *)pmd; pmd 168 arch/x86/include/asm/pgtable-3level.h pmd_t pmd; pmd 181 arch/x86/include/asm/pgtable-3level.h return res.pmd; pmd 190 arch/x86/include/asm/pgtable-3level.h unsigned long address, pmd_t *pmdp, pmd_t pmd) pmd 199 arch/x86/include/asm/pgtable-3level.h if (!(pmd_val(pmd) & _PAGE_PRESENT)) { pmd 204 arch/x86/include/asm/pgtable-3level.h new.pmd = pmd; pmd 210 arch/x86/include/asm/pgtable-3level.h return old.pmd; pmd 215 arch/x86/include/asm/pgtable-3level.h } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); pmd 30 arch/x86/include/asm/pgtable.h int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); pmd 69 arch/x86/include/asm/pgtable.h #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) pmd 93 arch/x86/include/asm/pgtable.h #define pmd_clear(pmd) native_pmd_clear(pmd) pmd 162 arch/x86/include/asm/pgtable.h static inline int pmd_dirty(pmd_t pmd) pmd 164 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & _PAGE_DIRTY; pmd 167 arch/x86/include/asm/pgtable.h static inline int pmd_young(pmd_t pmd) pmd 169 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & _PAGE_ACCESSED; pmd 218 arch/x86/include/asm/pgtable.h static inline unsigned long pmd_pfn(pmd_t pmd) pmd 220 arch/x86/include/asm/pgtable.h phys_addr_t pfn = pmd_val(pmd); pmd 222 arch/x86/include/asm/pgtable.h return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; pmd 257 arch/x86/include/asm/pgtable.h static inline int pmd_trans_huge(pmd_t pmd) pmd 259 arch/x86/include/asm/pgtable.h return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; pmd 276 arch/x86/include/asm/pgtable.h static inline int pmd_devmap(pmd_t pmd) pmd 278 arch/x86/include/asm/pgtable.h return !!(pmd_val(pmd) & _PAGE_DEVMAP); pmd 379 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) pmd 381 arch/x86/include/asm/pgtable.h pmdval_t v = native_pmd_val(pmd); pmd 386 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) pmd 388 arch/x86/include/asm/pgtable.h pmdval_t v = native_pmd_val(pmd); pmd 393 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkold(pmd_t pmd) pmd 395 arch/x86/include/asm/pgtable.h return pmd_clear_flags(pmd, _PAGE_ACCESSED); pmd 398 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkclean(pmd_t pmd) pmd 400 arch/x86/include/asm/pgtable.h return pmd_clear_flags(pmd, _PAGE_DIRTY); pmd 403 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_wrprotect(pmd_t pmd) pmd 405 arch/x86/include/asm/pgtable.h return pmd_clear_flags(pmd, _PAGE_RW); pmd 408 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkdirty(pmd_t pmd) pmd 410 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); pmd 413 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkdevmap(pmd_t pmd) pmd 415 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_DEVMAP); pmd 418 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkhuge(pmd_t pmd) pmd 420 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_PSE); pmd 423 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkyoung(pmd_t pmd) pmd 425 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_ACCESSED); pmd 428 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mkwrite(pmd_t pmd) pmd 430 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_RW); pmd 493 arch/x86/include/asm/pgtable.h static inline int pmd_soft_dirty(pmd_t pmd) pmd 495 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; pmd 508 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) pmd 510 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); pmd 523 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) pmd 525 arch/x86/include/asm/pgtable.h return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); pmd 590 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_mknotpresent(pmd_t pmd) pmd 592 arch/x86/include/asm/pgtable.h return pfn_pmd(pmd_pfn(pmd), pmd 593 arch/x86/include/asm/pgtable.h __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); pmd 618 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmd 620 arch/x86/include/asm/pgtable.h pmdval_t val = pmd_val(pmd), oldval = val; pmd 759 arch/x86/include/asm/pgtable.h static inline int pmd_present(pmd_t pmd) pmd 767 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); pmd 781 arch/x86/include/asm/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 783 arch/x86/include/asm/pgtable.h return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) pmd 788 arch/x86/include/asm/pgtable.h static inline int pmd_none(pmd_t pmd) pmd 792 arch/x86/include/asm/pgtable.h unsigned long val = native_pmd_val(pmd); pmd 796 arch/x86/include/asm/pgtable.h static inline unsigned long pmd_page_vaddr(pmd_t pmd) pmd 798 arch/x86/include/asm/pgtable.h return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); pmd 805 arch/x86/include/asm/pgtable.h #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) pmd 838 arch/x86/include/asm/pgtable.h static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) pmd 840 arch/x86/include/asm/pgtable.h return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); pmd 843 arch/x86/include/asm/pgtable.h static inline int pmd_bad(pmd_t pmd) pmd 845 arch/x86/include/asm/pgtable.h return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; pmd 1090 arch/x86/include/asm/pgtable.h pmd_t *pmdp, pmd_t pmd) pmd 1092 arch/x86/include/asm/pgtable.h set_pmd(pmdp, pmd); pmd 1180 arch/x86/include/asm/pgtable.h static inline int pmd_write(pmd_t pmd) pmd 1182 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & _PAGE_RW; pmd 1215 arch/x86/include/asm/pgtable.h unsigned long address, pmd_t *pmdp, pmd_t pmd) pmd 1218 arch/x86/include/asm/pgtable.h return xchg(pmdp, pmd); pmd 1221 arch/x86/include/asm/pgtable.h WRITE_ONCE(*pmdp, pmd); pmd 1336 arch/x86/include/asm/pgtable.h unsigned long addr, pmd_t *pmd) pmd 1361 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) pmd 1363 arch/x86/include/asm/pgtable.h return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY); pmd 1366 arch/x86/include/asm/pgtable.h static inline int pmd_swp_soft_dirty(pmd_t pmd) pmd 1368 arch/x86/include/asm/pgtable.h return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY; pmd 1371 arch/x86/include/asm/pgtable.h static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) pmd 1373 arch/x86/include/asm/pgtable.h return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY); pmd 1451 arch/x86/include/asm/pgtable.h static inline bool pmd_access_permitted(pmd_t pmd, bool write) pmd 1453 arch/x86/include/asm/pgtable.h return __pte_access_permitted(pmd_val(pmd), write); pmd 75 arch/x86/include/asm/pgtable_64.h static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) pmd 77 arch/x86/include/asm/pgtable_64.h WRITE_ONCE(*pmdp, pmd); pmd 80 arch/x86/include/asm/pgtable_64.h static inline void native_pmd_clear(pmd_t *pmd) pmd 82 arch/x86/include/asm/pgtable_64.h native_set_pmd(pmd, native_make_pmd(0)); pmd 101 arch/x86/include/asm/pgtable_64.h return native_make_pmd(xchg(&xp->pmd, 0)); pmd 234 arch/x86/include/asm/pgtable_64.h #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) }) pmd 236 arch/x86/include/asm/pgtable_64.h #define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val }) pmd 360 arch/x86/include/asm/pgtable_types.h typedef struct { pmdval_t pmd; } pmd_t; pmd 367 arch/x86/include/asm/pgtable_types.h static inline pmdval_t native_pmd_val(pmd_t pmd) pmd 369 arch/x86/include/asm/pgtable_types.h return pmd.pmd; pmd 379 arch/x86/include/asm/pgtable_types.h static inline pmdval_t native_pmd_val(pmd_t pmd) pmd 381 arch/x86/include/asm/pgtable_types.h return native_pgd_val(pmd.pud.p4d.pgd); pmd 419 arch/x86/include/asm/pgtable_types.h static inline pmdval_t pmd_pfn_mask(pmd_t pmd) pmd 421 arch/x86/include/asm/pgtable_types.h if (native_pmd_val(pmd) & _PAGE_PSE) pmd 427 arch/x86/include/asm/pgtable_types.h static inline pmdval_t pmd_flags_mask(pmd_t pmd) pmd 429 arch/x86/include/asm/pgtable_types.h return ~pmd_pfn_mask(pmd); pmd 432 arch/x86/include/asm/pgtable_types.h static inline pmdval_t pmd_flags(pmd_t pmd) pmd 434 arch/x86/include/asm/pgtable_types.h return native_pmd_val(pmd) & pmd_flags_mask(pmd); pmd 340 arch/x86/include/asm/xen/page.h #define pmd_val_ma(v) ((v).pmd) pmd 136 arch/x86/kernel/espfix_64.c pmd_t pmd, *pmd_p; pmd 177 arch/x86/kernel/espfix_64.c pmd = *pmd_p; pmd 178 arch/x86/kernel/espfix_64.c if (!pmd_present(pmd)) { pmd 182 arch/x86/kernel/espfix_64.c pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); pmd 185 arch/x86/kernel/espfix_64.c set_pmd(&pmd_p[n], pmd); pmd 188 arch/x86/kernel/espfix_64.c pte_p = pte_offset_kernel(&pmd, addr); pmd 86 arch/x86/kernel/head32.c #define SET_PL2(pl2, val) { (pl2).pmd = (val); } pmd 122 arch/x86/kernel/head64.c pmdval_t *pmd, pmd_entry; pmd 169 arch/x86/kernel/head64.c pmd = fixup_pointer(level2_fixmap_pgt, physaddr); pmd 171 arch/x86/kernel/head64.c pmd[i] += load_delta; pmd 182 arch/x86/kernel/head64.c pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr); pmd 204 arch/x86/kernel/head64.c pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; pmd 205 arch/x86/kernel/head64.c pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; pmd 217 arch/x86/kernel/head64.c pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; pmd 236 arch/x86/kernel/head64.c pmd = fixup_pointer(level2_kernel_pgt, physaddr); pmd 240 arch/x86/kernel/head64.c pmd[i] &= ~_PAGE_PRESENT; pmd 244 arch/x86/kernel/head64.c if (pmd[i] & _PAGE_PRESENT) pmd 245 arch/x86/kernel/head64.c pmd[i] += load_delta; pmd 249 arch/x86/kernel/head64.c pmd[i] &= ~_PAGE_PRESENT; pmd 271 arch/x86/kernel/head64.c pmd[i] -= sme_get_me_mask(); pmd 300 arch/x86/kernel/head64.c int __init __early_make_pgtable(unsigned long address, pmdval_t pmd) pmd 365 arch/x86/kernel/head64.c pmd_p[pmd_index(address)] = pmd; pmd 373 arch/x86/kernel/head64.c pmdval_t pmd; pmd 375 arch/x86/kernel/head64.c pmd = (physaddr & PMD_MASK) + early_pmd_flags; pmd 377 arch/x86/kernel/head64.c return __early_make_pgtable(address, pmd); pmd 172 arch/x86/kernel/ldt.c had_kernel = (k_pmd->pmd != 0); pmd 173 arch/x86/kernel/ldt.c had_user = (u_pmd->pmd != 0); pmd 92 arch/x86/kernel/machine_kexec_32.c pgd_t *pgd, pmd_t *pmd, pte_t *pte, pmd 101 arch/x86/kernel/machine_kexec_32.c set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); pmd 105 arch/x86/kernel/machine_kexec_32.c pmd = pmd_offset(pud, vaddr); pmd 106 arch/x86/kernel/machine_kexec_32.c if (!(pmd_val(*pmd) & _PAGE_PRESENT)) pmd 107 arch/x86/kernel/machine_kexec_32.c set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); pmd 108 arch/x86/kernel/machine_kexec_32.c pte = pte_offset_kernel(pmd, vaddr); pmd 115 arch/x86/kernel/machine_kexec_32.c pmd_t *pmd = NULL; pmd 119 arch/x86/kernel/machine_kexec_32.c pmd = image->arch.pmd0; pmd 122 arch/x86/kernel/machine_kexec_32.c image->arch.pgd, pmd, image->arch.pte0, pmd 125 arch/x86/kernel/machine_kexec_32.c pmd = image->arch.pmd1; pmd 128 arch/x86/kernel/machine_kexec_32.c image->arch.pgd, pmd, image->arch.pte1, pmd 118 arch/x86/kernel/machine_kexec_64.c free_page((unsigned long)image->arch.pmd); pmd 119 arch/x86/kernel/machine_kexec_64.c image->arch.pmd = NULL; pmd 131 arch/x86/kernel/machine_kexec_64.c pmd_t *pmd; pmd 154 arch/x86/kernel/machine_kexec_64.c pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); pmd 155 arch/x86/kernel/machine_kexec_64.c if (!pmd) pmd 157 arch/x86/kernel/machine_kexec_64.c image->arch.pmd = pmd; pmd 158 arch/x86/kernel/machine_kexec_64.c set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); pmd 160 arch/x86/kernel/machine_kexec_64.c pmd = pmd_offset(pud, vaddr); pmd 161 arch/x86/kernel/machine_kexec_64.c if (!pmd_present(*pmd)) { pmd 166 arch/x86/kernel/machine_kexec_64.c set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); pmd 168 arch/x86/kernel/machine_kexec_64.c pte = pte_offset_kernel(pmd, vaddr); pmd 110 arch/x86/kernel/tboot.c pmd_t *pmd; pmd 120 arch/x86/kernel/tboot.c pmd = pmd_alloc(&tboot_mm, pud, vaddr); pmd 121 arch/x86/kernel/tboot.c if (!pmd) pmd 123 arch/x86/kernel/tboot.c pte = pte_alloc_map(&tboot_mm, pmd, vaddr); pmd 171 arch/x86/kernel/vm86_32.c pmd_t *pmd; pmd 185 arch/x86/kernel/vm86_32.c pmd = pmd_offset(pud, 0xA0000); pmd 187 arch/x86/kernel/vm86_32.c if (pmd_trans_huge(*pmd)) { pmd 189 arch/x86/kernel/vm86_32.c split_huge_pmd(vma, pmd, 0xA0000); pmd 191 arch/x86/kernel/vm86_32.c if (pmd_none_or_clear_bad(pmd)) pmd 193 arch/x86/kernel/vm86_32.c pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); pmd 155 arch/x86/mm/fault.c pmd_t *pmd, *pmd_k; pmd 178 arch/x86/mm/fault.c pmd = pmd_offset(pud, address); pmd 181 arch/x86/mm/fault.c if (pmd_present(*pmd) != pmd_present(*pmd_k)) pmd 182 arch/x86/mm/fault.c set_pmd(pmd, *pmd_k); pmd 187 arch/x86/mm/fault.c BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); pmd 297 arch/x86/mm/fault.c pmd_t *pmd; pmd 310 arch/x86/mm/fault.c pmd = pmd_offset(pud, address); pmd 311 arch/x86/mm/fault.c pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); pmd 320 arch/x86/mm/fault.c if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) pmd 323 arch/x86/mm/fault.c pte = pte_offset_kernel(pmd, address); pmd 358 arch/x86/mm/fault.c pmd_t *pmd; pmd 406 arch/x86/mm/fault.c pmd = pmd_offset(pud, address); pmd 407 arch/x86/mm/fault.c if (pmd_none(*pmd)) pmd 410 arch/x86/mm/fault.c if (pmd_large(*pmd)) pmd 413 arch/x86/mm/fault.c pte = pte_offset_kernel(pmd, address); pmd 452 arch/x86/mm/fault.c pmd_t *pmd; pmd 479 arch/x86/mm/fault.c pmd = pmd_offset(pud, address); pmd 480 arch/x86/mm/fault.c if (bad_address(pmd)) pmd 483 arch/x86/mm/fault.c pr_cont("PMD %lx ", pmd_val(*pmd)); pmd 484 arch/x86/mm/fault.c if (!pmd_present(*pmd) || pmd_large(*pmd)) pmd 487 arch/x86/mm/fault.c pte = pte_offset_kernel(pmd, address); pmd 1126 arch/x86/mm/fault.c pmd_t *pmd; pmd 1161 arch/x86/mm/fault.c pmd = pmd_offset(pud, address); pmd 1162 arch/x86/mm/fault.c if (!pmd_present(*pmd)) pmd 1165 arch/x86/mm/fault.c if (pmd_large(*pmd)) pmd 1166 arch/x86/mm/fault.c return spurious_kernel_fault_check(error_code, (pte_t *) pmd); pmd 1168 arch/x86/mm/fault.c pte = pte_offset_kernel(pmd, address); pmd 1180 arch/x86/mm/fault.c ret = spurious_kernel_fault_check(error_code, (pte_t *) pmd); pmd 50 arch/x86/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 67 arch/x86/mm/hugetlbpage.c int pmd_huge(pmd_t pmd) pmd 69 arch/x86/mm/hugetlbpage.c return !pmd_none(pmd) && pmd 70 arch/x86/mm/hugetlbpage.c (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; pmd 12 arch/x86/mm/ident_map.c pmd_t *pmd = pmd_page + pmd_index(addr); pmd 14 arch/x86/mm/ident_map.c if (pmd_present(*pmd)) pmd 17 arch/x86/mm/ident_map.c set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag)); pmd 28 arch/x86/mm/ident_map.c pmd_t *pmd; pmd 47 arch/x86/mm/ident_map.c pmd = pmd_offset(pud, 0); pmd 48 arch/x86/mm/ident_map.c ident_pmd_init(info, pmd, addr, next); pmd 51 arch/x86/mm/ident_map.c pmd = (pmd_t *)info->alloc_pgt_page(info->context); pmd 52 arch/x86/mm/ident_map.c if (!pmd) pmd 54 arch/x86/mm/ident_map.c ident_pmd_init(info, pmd, addr, next); pmd 55 arch/x86/mm/ident_map.c set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag)); pmd 96 arch/x86/mm/init_32.c static pte_t * __init one_page_table_init(pmd_t *pmd) pmd 98 arch/x86/mm/init_32.c if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pmd 102 arch/x86/mm/init_32.c set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); pmd 103 arch/x86/mm/init_32.c BUG_ON(page_table != pte_offset_kernel(pmd, 0)); pmd 106 arch/x86/mm/init_32.c return pte_offset_kernel(pmd, 0); pmd 120 arch/x86/mm/init_32.c pmd_t *pmd; pmd 122 arch/x86/mm/init_32.c pmd = populate_extra_pmd(vaddr); pmd 123 arch/x86/mm/init_32.c return one_page_table_init(pmd) + pte_idx; pmd 157 arch/x86/mm/init_32.c static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, pmd 184 arch/x86/mm/init_32.c set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); pmd 185 arch/x86/mm/init_32.c BUG_ON(newpte != pte_offset_kernel(pmd, 0)); pmd 213 arch/x86/mm/init_32.c pmd_t *pmd; pmd 227 arch/x86/mm/init_32.c pmd = one_md_table_init(pgd); pmd 228 arch/x86/mm/init_32.c pmd = pmd + pmd_index(vaddr); pmd 230 arch/x86/mm/init_32.c pmd++, pmd_idx++) { pmd 231 arch/x86/mm/init_32.c pte = page_table_kmap_check(one_page_table_init(pmd), pmd 232 arch/x86/mm/init_32.c pmd, vaddr, pte, &adr); pmd 264 arch/x86/mm/init_32.c pmd_t *pmd; pmd 297 arch/x86/mm/init_32.c pmd = one_md_table_init(pgd); pmd 303 arch/x86/mm/init_32.c pmd += pmd_idx; pmd 308 arch/x86/mm/init_32.c pmd++, pmd_idx++) { pmd 336 arch/x86/mm/init_32.c set_pmd(pmd, pfn_pmd(pfn, init_prot)); pmd 338 arch/x86/mm/init_32.c set_pmd(pmd, pfn_pmd(pfn, prot)); pmd 343 arch/x86/mm/init_32.c pte = one_page_table_init(pmd); pmd 398 arch/x86/mm/init_32.c pmd_t *pmd = pmd_offset(pud, vaddr); pmd 399 arch/x86/mm/init_32.c return pte_offset_kernel(pmd, vaddr); pmd 420 arch/x86/mm/init_32.c pmd_t *pmd; pmd 429 arch/x86/mm/init_32.c pmd = pmd_offset(pud, vaddr); pmd 430 arch/x86/mm/init_32.c pte = pte_offset_kernel(pmd, vaddr); pmd 477 arch/x86/mm/init_32.c pmd_t *pmd; pmd 497 arch/x86/mm/init_32.c pmd = pmd_offset(pud, va); pmd 498 arch/x86/mm/init_32.c if (!pmd_present(*pmd)) pmd 502 arch/x86/mm/init_32.c if (pmd_large(*pmd)) { pmd 504 arch/x86/mm/init_32.c pfn, pmd, __pa(pmd)); pmd 508 arch/x86/mm/init_32.c pte = pte_offset_kernel(pmd, va); pmd 513 arch/x86/mm/init_32.c pfn, pmd, __pa(pmd), pte, __pa(pte)); pmd 74 arch/x86/mm/init_64.c DEFINE_POPULATE(pud_populate, pud, pmd, init) pmd 75 arch/x86/mm/init_64.c DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) pmd 89 arch/x86/mm/init_64.c DEFINE_ENTRY(pmd, pmd, init) pmd 270 arch/x86/mm/init_64.c pmd_t *pmd = (pmd_t *) spp_getpage(); pmd 271 arch/x86/mm/init_64.c pud_populate(&init_mm, pud, pmd); pmd 272 arch/x86/mm/init_64.c if (pmd != pmd_offset(pud, 0)) pmd 274 arch/x86/mm/init_64.c pmd, pmd_offset(pud, 0)); pmd 279 arch/x86/mm/init_64.c static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) pmd 281 arch/x86/mm/init_64.c if (pmd_none(*pmd)) { pmd 283 arch/x86/mm/init_64.c pmd_populate_kernel(&init_mm, pmd, pte); pmd 284 arch/x86/mm/init_64.c if (pte != pte_offset_kernel(pmd, 0)) pmd 287 arch/x86/mm/init_64.c return pte_offset_kernel(pmd, vaddr); pmd 292 arch/x86/mm/init_64.c pmd_t *pmd = fill_pmd(pud, vaddr); pmd 293 arch/x86/mm/init_64.c pte_t *pte = fill_pte(pmd, vaddr); pmd 351 arch/x86/mm/init_64.c pmd_t *pmd; pmd 353 arch/x86/mm/init_64.c pmd = populate_extra_pmd(vaddr); pmd 354 arch/x86/mm/init_64.c return fill_pte(pmd, vaddr); pmd 366 arch/x86/mm/init_64.c pmd_t *pmd; pmd 387 arch/x86/mm/init_64.c pmd = (pmd_t *) spp_getpage(); pmd 388 arch/x86/mm/init_64.c set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | pmd 391 arch/x86/mm/init_64.c pmd = pmd_offset(pud, phys); pmd 392 arch/x86/mm/init_64.c BUG_ON(!pmd_none(*pmd)); pmd 393 arch/x86/mm/init_64.c set_pmd(pmd, __pmd(phys | pgprot_val(prot))); pmd 425 arch/x86/mm/init_64.c pmd_t *pmd = level2_kernel_pgt; pmd 435 arch/x86/mm/init_64.c for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { pmd 436 arch/x86/mm/init_64.c if (pmd_none(*pmd)) pmd 439 arch/x86/mm/init_64.c set_pmd(pmd, __pmd(0)); pmd 511 arch/x86/mm/init_64.c pmd_t *pmd = pmd_page + pmd_index(paddr); pmd 522 arch/x86/mm/init_64.c set_pmd_init(pmd, __pmd(0), init); pmd 526 arch/x86/mm/init_64.c if (!pmd_none(*pmd)) { pmd 527 arch/x86/mm/init_64.c if (!pmd_large(*pmd)) { pmd 529 arch/x86/mm/init_64.c pte = (pte_t *)pmd_page_vaddr(*pmd); pmd 554 arch/x86/mm/init_64.c new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); pmd 560 arch/x86/mm/init_64.c set_pte_init((pte_t *)pmd, pmd 573 arch/x86/mm/init_64.c pmd_populate_kernel_init(&init_mm, pmd, pte, init); pmd 597 arch/x86/mm/init_64.c pmd_t *pmd; pmd 616 arch/x86/mm/init_64.c pmd = pmd_offset(pud, 0); pmd 617 arch/x86/mm/init_64.c paddr_last = phys_pmd_init(pmd, paddr, pmd 656 arch/x86/mm/init_64.c pmd = alloc_low_page(); pmd 657 arch/x86/mm/init_64.c paddr_last = phys_pmd_init(pmd, paddr, paddr_end, pmd 661 arch/x86/mm/init_64.c pud_populate_init(&init_mm, pud, pmd, init); pmd 903 arch/x86/mm/init_64.c static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) pmd 915 arch/x86/mm/init_64.c free_pagetable(pmd_page(*pmd), 0); pmd 917 arch/x86/mm/init_64.c pmd_clear(pmd); pmd 923 arch/x86/mm/init_64.c pmd_t *pmd; pmd 927 arch/x86/mm/init_64.c pmd = pmd_start + i; pmd 928 arch/x86/mm/init_64.c if (!pmd_none(*pmd)) pmd 1034 arch/x86/mm/init_64.c pmd_t *pmd; pmd 1037 arch/x86/mm/init_64.c pmd = pmd_start + pmd_index(addr); pmd 1038 arch/x86/mm/init_64.c for (; addr < end; addr = next, pmd++) { pmd 1041 arch/x86/mm/init_64.c if (!pmd_present(*pmd)) pmd 1044 arch/x86/mm/init_64.c if (pmd_large(*pmd)) { pmd 1048 arch/x86/mm/init_64.c free_hugepage_table(pmd_page(*pmd), pmd 1052 arch/x86/mm/init_64.c pmd_clear(pmd); pmd 1059 arch/x86/mm/init_64.c page_addr = page_address(pmd_page(*pmd)); pmd 1062 arch/x86/mm/init_64.c free_hugepage_table(pmd_page(*pmd), pmd 1066 arch/x86/mm/init_64.c pmd_clear(pmd); pmd 1074 arch/x86/mm/init_64.c pte_base = (pte_t *)pmd_page_vaddr(*pmd); pmd 1076 arch/x86/mm/init_64.c free_pte_table(pte_base, pmd); pmd 1347 arch/x86/mm/init_64.c pmd_t *pmd; pmd 1368 arch/x86/mm/init_64.c pmd = pmd_offset(pud, addr); pmd 1369 arch/x86/mm/init_64.c if (pmd_none(*pmd)) pmd 1372 arch/x86/mm/init_64.c if (pmd_large(*pmd)) pmd 1373 arch/x86/mm/init_64.c return pfn_valid(pmd_pfn(*pmd)); pmd 1375 arch/x86/mm/init_64.c pte = pte_offset_kernel(pmd, addr); pmd 1457 arch/x86/mm/init_64.c pmd_t *pmd; pmd 1474 arch/x86/mm/init_64.c pmd = pmd_offset(pud, addr); pmd 1475 arch/x86/mm/init_64.c if (pmd_none(*pmd)) { pmd 1487 arch/x86/mm/init_64.c set_pmd(pmd, __pmd(pte_val(entry))); pmd 1504 arch/x86/mm/init_64.c } else if (pmd_large(*pmd)) { pmd 1505 arch/x86/mm/init_64.c vmemmap_verify((pte_t *)pmd, node, addr, next); pmd 1544 arch/x86/mm/init_64.c pmd_t *pmd; pmd 1574 arch/x86/mm/init_64.c pmd = pmd_offset(pud, addr); pmd 1575 arch/x86/mm/init_64.c if (pmd_none(*pmd)) pmd 1577 arch/x86/mm/init_64.c get_page_bootmem(section_nr, pmd_page(*pmd), pmd 1580 arch/x86/mm/init_64.c pte = pte_offset_kernel(pmd, addr); pmd 1588 arch/x86/mm/init_64.c pmd = pmd_offset(pud, addr); pmd 1589 arch/x86/mm/init_64.c if (pmd_none(*pmd)) pmd 1593 arch/x86/mm/init_64.c page = pmd_page(*pmd); pmd 808 arch/x86/mm/ioremap.c pmd_t *pmd = pmd_offset(pud, addr); pmd 810 arch/x86/mm/ioremap.c return pmd; pmd 825 arch/x86/mm/ioremap.c pmd_t *pmd; pmd 835 arch/x86/mm/ioremap.c pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); pmd 837 arch/x86/mm/ioremap.c pmd_populate_kernel(&init_mm, pmd, bm_pte); pmd 847 arch/x86/mm/ioremap.c if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { pmd 850 arch/x86/mm/ioremap.c pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); pmd 39 arch/x86/mm/kasan_init_64.c static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, pmd 44 arch/x86/mm/kasan_init_64.c if (pmd_none(*pmd)) { pmd 51 arch/x86/mm/kasan_init_64.c if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) pmd 58 arch/x86/mm/kasan_init_64.c pmd_populate_kernel(&init_mm, pmd, p); pmd 61 arch/x86/mm/kasan_init_64.c pte = pte_offset_kernel(pmd, addr); pmd 78 arch/x86/mm/kasan_init_64.c pmd_t *pmd; pmd 98 arch/x86/mm/kasan_init_64.c pmd = pmd_offset(pud, addr); pmd 101 arch/x86/mm/kasan_init_64.c if (!pmd_large(*pmd)) pmd 102 arch/x86/mm/kasan_init_64.c kasan_populate_pmd(pmd, addr, next, nid); pmd 103 arch/x86/mm/kasan_init_64.c } while (pmd++, addr = next, addr != end); pmd 127 arch/x86/mm/kmmio.c static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) pmd 130 arch/x86/mm/kmmio.c pmdval_t v = pmd_val(*pmd); pmd 133 arch/x86/mm/kmmio.c new_pmd = pmd_mknotpresent(*pmd); pmd 138 arch/x86/mm/kmmio.c set_pmd(pmd, new_pmd); pmd 123 arch/x86/mm/mem_encrypt.c pmdval_t pmd_flags, pmd; pmd 129 arch/x86/mm/mem_encrypt.c pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; pmd 130 arch/x86/mm/mem_encrypt.c __early_make_pgtable((unsigned long)vaddr, pmd); pmd 110 arch/x86/mm/mem_encrypt_identity.c pmd_t *pmd; pmd 130 arch/x86/mm/mem_encrypt_identity.c pmd = ppd->pgtable_area; pmd 131 arch/x86/mm/mem_encrypt_identity.c memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); pmd 132 arch/x86/mm/mem_encrypt_identity.c ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; pmd 133 arch/x86/mm/mem_encrypt_identity.c set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); pmd 145 arch/x86/mm/mem_encrypt_identity.c pmd_t *pmd; pmd 151 arch/x86/mm/mem_encrypt_identity.c pmd = pmd_offset(pud, ppd->vaddr); pmd 152 arch/x86/mm/mem_encrypt_identity.c if (pmd_large(*pmd)) pmd 155 arch/x86/mm/mem_encrypt_identity.c set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); pmd 161 arch/x86/mm/mem_encrypt_identity.c pmd_t *pmd; pmd 168 arch/x86/mm/mem_encrypt_identity.c pmd = pmd_offset(pud, ppd->vaddr); pmd 169 arch/x86/mm/mem_encrypt_identity.c if (pmd_none(*pmd)) { pmd 173 arch/x86/mm/mem_encrypt_identity.c set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); pmd 176 arch/x86/mm/mem_encrypt_identity.c if (pmd_large(*pmd)) pmd 179 arch/x86/mm/mem_encrypt_identity.c pte = pte_offset_map(pmd, ppd->vaddr); pmd 572 arch/x86/mm/pageattr.c pmd_t *pmd; pmd 595 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, address); pmd 596 arch/x86/mm/pageattr.c if (pmd_none(*pmd)) pmd 600 arch/x86/mm/pageattr.c if (pmd_large(*pmd) || !pmd_present(*pmd)) pmd 601 arch/x86/mm/pageattr.c return (pte_t *)pmd; pmd 605 arch/x86/mm/pageattr.c return pte_offset_kernel(pmd, address); pmd 717 arch/x86/mm/pageattr.c pmd_t *pmd; pmd 722 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, address); pmd 723 arch/x86/mm/pageattr.c set_pte_atomic((pte_t *)pmd, pte); pmd 1076 arch/x86/mm/pageattr.c static bool try_to_free_pmd_page(pmd_t *pmd) pmd 1081 arch/x86/mm/pageattr.c if (!pmd_none(pmd[i])) pmd 1084 arch/x86/mm/pageattr.c free_page((unsigned long)pmd); pmd 1088 arch/x86/mm/pageattr.c static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) pmd 1090 arch/x86/mm/pageattr.c pte_t *pte = pte_offset_kernel(pmd, start); pmd 1099 arch/x86/mm/pageattr.c if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { pmd 1100 arch/x86/mm/pageattr.c pmd_clear(pmd); pmd 1106 arch/x86/mm/pageattr.c static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, pmd 1109 arch/x86/mm/pageattr.c if (unmap_pte_range(pmd, start, end)) pmd 1116 arch/x86/mm/pageattr.c pmd_t *pmd = pmd_offset(pud, start); pmd 1125 arch/x86/mm/pageattr.c __unmap_pmd_range(pud, pmd, start, pre_end); pmd 1128 arch/x86/mm/pageattr.c pmd++; pmd 1135 arch/x86/mm/pageattr.c if (pmd_large(*pmd)) pmd 1136 arch/x86/mm/pageattr.c pmd_clear(pmd); pmd 1138 arch/x86/mm/pageattr.c __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); pmd 1141 arch/x86/mm/pageattr.c pmd++; pmd 1148 arch/x86/mm/pageattr.c return __unmap_pmd_range(pud, pmd, start, end); pmd 1201 arch/x86/mm/pageattr.c static int alloc_pte_page(pmd_t *pmd) pmd 1207 arch/x86/mm/pageattr.c set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); pmd 1213 arch/x86/mm/pageattr.c pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); pmd 1214 arch/x86/mm/pageattr.c if (!pmd) pmd 1217 arch/x86/mm/pageattr.c set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); pmd 1223 arch/x86/mm/pageattr.c unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) pmd 1227 arch/x86/mm/pageattr.c pte = pte_offset_kernel(pmd, start); pmd 1245 arch/x86/mm/pageattr.c pmd_t *pmd; pmd 1262 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); pmd 1263 arch/x86/mm/pageattr.c if (pmd_none(*pmd)) pmd 1264 arch/x86/mm/pageattr.c if (alloc_pte_page(pmd)) pmd 1267 arch/x86/mm/pageattr.c populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); pmd 1289 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); pmd 1291 arch/x86/mm/pageattr.c set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, pmd 1303 arch/x86/mm/pageattr.c pmd = pmd_offset(pud, start); pmd 1304 arch/x86/mm/pageattr.c if (pmd_none(*pmd)) pmd 1305 arch/x86/mm/pageattr.c if (alloc_pte_page(pmd)) pmd 1309 arch/x86/mm/pageattr.c pmd, pgprot); pmd 54 arch/x86/mm/pgtable.c void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) pmd 56 arch/x86/mm/pgtable.c struct page *page = virt_to_page(pmd); pmd 57 arch/x86/mm/pgtable.c paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); pmd 181 arch/x86/mm/pgtable.c void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) pmd 183 arch/x86/mm/pgtable.c paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); pmd 187 arch/x86/mm/pgtable.c set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); pmd 228 arch/x86/mm/pgtable.c pmd_t *pmd = (pmd_t *)__get_free_page(gfp); pmd 229 arch/x86/mm/pgtable.c if (!pmd) pmd 231 arch/x86/mm/pgtable.c if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { pmd 232 arch/x86/mm/pgtable.c free_page((unsigned long)pmd); pmd 233 arch/x86/mm/pgtable.c pmd = NULL; pmd 236 arch/x86/mm/pgtable.c if (pmd) pmd 238 arch/x86/mm/pgtable.c pmds[i] = pmd; pmd 260 arch/x86/mm/pgtable.c pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); pmd 265 arch/x86/mm/pgtable.c pmd_free(mm, pmd); pmd 302 arch/x86/mm/pgtable.c pmd_t *pmd = pmds[i]; pmd 305 arch/x86/mm/pgtable.c memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), pmd 308 arch/x86/mm/pgtable.c pud_populate(mm, pud, pmd); pmd 329 arch/x86/mm/pgtable.c pmd_t *pmd = pmds[i]; pmd 331 arch/x86/mm/pgtable.c memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd), pmd 334 arch/x86/mm/pgtable.c pud_populate(mm, u_pud, pmd); pmd 725 arch/x86/mm/pgtable.c int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) pmd 738 arch/x86/mm/pgtable.c if (pmd_present(*pmd) && !pmd_huge(*pmd)) pmd 743 arch/x86/mm/pgtable.c set_pte((pte_t *)pmd, pfn_pte( pmd 770 arch/x86/mm/pgtable.c int pmd_clear_huge(pmd_t *pmd) pmd 772 arch/x86/mm/pgtable.c if (pmd_large(*pmd)) { pmd 773 arch/x86/mm/pgtable.c pmd_clear(pmd); pmd 801 arch/x86/mm/pgtable.c pmd_t *pmd, *pmd_sv; pmd 805 arch/x86/mm/pgtable.c pmd = (pmd_t *)pud_page_vaddr(*pud); pmd 811 arch/x86/mm/pgtable.c pmd_sv[i] = pmd[i]; pmd 812 arch/x86/mm/pgtable.c if (!pmd_none(pmd[i])) pmd 813 arch/x86/mm/pgtable.c pmd_clear(&pmd[i]); pmd 829 arch/x86/mm/pgtable.c free_page((unsigned long)pmd); pmd 842 arch/x86/mm/pgtable.c int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) pmd 846 arch/x86/mm/pgtable.c pte = (pte_t *)pmd_page_vaddr(*pmd); pmd 847 arch/x86/mm/pgtable.c pmd_clear(pmd); pmd 868 arch/x86/mm/pgtable.c int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) pmd 870 arch/x86/mm/pgtable.c return pmd_none(*pmd); pmd 33 arch/x86/mm/pgtable_32.c pmd_t *pmd; pmd 51 arch/x86/mm/pgtable_32.c pmd = pmd_offset(pud, vaddr); pmd 52 arch/x86/mm/pgtable_32.c if (pmd_none(*pmd)) { pmd 56 arch/x86/mm/pgtable_32.c pte = pte_offset_kernel(pmd, vaddr); pmd 248 arch/x86/mm/pti.c pmd_t *pmd; pmd 251 arch/x86/mm/pti.c pmd = pti_user_pagetable_walk_pmd(address); pmd 252 arch/x86/mm/pti.c if (!pmd) pmd 256 arch/x86/mm/pti.c if (pmd_large(*pmd)) { pmd 261 arch/x86/mm/pti.c if (pmd_none(*pmd)) { pmd 266 arch/x86/mm/pti.c set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); pmd 269 arch/x86/mm/pti.c pte = pte_offset_kernel(pmd, address); pmd 315 arch/x86/mm/pti.c pmd_t *pmd, *target_pmd; pmd 338 arch/x86/mm/pti.c pmd = pmd_offset(pud, addr); pmd 339 arch/x86/mm/pti.c if (pmd_none(*pmd)) { pmd 345 arch/x86/mm/pti.c if (pmd_large(*pmd) || level == PTI_CLONE_PMD) { pmd 356 arch/x86/mm/pti.c if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT))) pmd 369 arch/x86/mm/pti.c *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL); pmd 376 arch/x86/mm/pti.c *target_pmd = *pmd; pmd 383 arch/x86/mm/pti.c pte = pte_offset_kernel(pmd, addr); pmd 216 arch/x86/power/hibernate.c pmd_t *pmd; pmd 238 arch/x86/power/hibernate.c pmd = pmd_offset(pud, relocated_restore_code); pmd 239 arch/x86/power/hibernate.c if (pmd_large(*pmd)) { pmd 240 arch/x86/power/hibernate.c set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); pmd 243 arch/x86/power/hibernate.c pte = pte_offset_kernel(pmd, relocated_restore_code); pmd 59 arch/x86/power/hibernate_32.c static pte_t *resume_one_page_table_init(pmd_t *pmd) pmd 61 arch/x86/power/hibernate_32.c if (pmd_none(*pmd)) { pmd 66 arch/x86/power/hibernate_32.c set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); pmd 68 arch/x86/power/hibernate_32.c BUG_ON(page_table != pte_offset_kernel(pmd, 0)); pmd 73 arch/x86/power/hibernate_32.c return pte_offset_kernel(pmd, 0); pmd 85 arch/x86/power/hibernate_32.c pmd_t *pmd; pmd 94 arch/x86/power/hibernate_32.c pmd = resume_one_md_table_init(pgd); pmd 95 arch/x86/power/hibernate_32.c if (!pmd) pmd 101 arch/x86/power/hibernate_32.c for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { pmd 110 arch/x86/power/hibernate_32.c set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); pmd 115 arch/x86/power/hibernate_32.c pte = resume_one_page_table_init(pmd); pmd 148 arch/x86/power/hibernate_32.c pmd_t *pmd; pmd 153 arch/x86/power/hibernate_32.c pmd = resume_one_md_table_init(pgd); pmd 154 arch/x86/power/hibernate_32.c if (!pmd) pmd 158 arch/x86/power/hibernate_32.c set_pmd(pmd + pmd_index(restore_jump_address), pmd 161 arch/x86/power/hibernate_32.c pte = resume_one_page_table_init(pmd); pmd 30 arch/x86/power/hibernate_64.c pmd_t *pmd; pmd 64 arch/x86/power/hibernate_64.c pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); pmd 65 arch/x86/power/hibernate_64.c if (!pmd) pmd 68 arch/x86/power/hibernate_64.c set_pmd(pmd + pmd_index(restore_jump_address), pmd 71 arch/x86/power/hibernate_64.c __pud(__pa(pmd) | pgprot_val(pgtable_prot))); pmd 404 arch/x86/xen/mmu_pv.c __visible pmdval_t xen_pmd_val(pmd_t pmd) pmd 406 arch/x86/xen/mmu_pv.c return pte_mfn_to_pfn(pmd.pmd); pmd 462 arch/x86/xen/mmu_pv.c __visible pmd_t xen_make_pmd(pmdval_t pmd) pmd 464 arch/x86/xen/mmu_pv.c pmd = pte_pfn_to_mfn(pmd); pmd 465 arch/x86/xen/mmu_pv.c return native_make_pmd(pmd); pmd 576 arch/x86/xen/mmu_pv.c static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, pmd 584 arch/x86/xen/mmu_pv.c if (!pmd_none(pmd[i])) pmd 585 arch/x86/xen/mmu_pv.c flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE); pmd 598 arch/x86/xen/mmu_pv.c pmd_t *pmd; pmd 603 arch/x86/xen/mmu_pv.c pmd = pmd_offset(&pud[i], 0); pmd 605 arch/x86/xen/mmu_pv.c flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); pmd 606 arch/x86/xen/mmu_pv.c flush |= xen_pmd_walk(mm, pmd, func, pmd 1097 arch/x86/xen/mmu_pv.c pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); pmd 1101 arch/x86/xen/mmu_pv.c for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); pmd 1102 arch/x86/xen/mmu_pv.c pmd++, vaddr += PMD_SIZE) { pmd 1103 arch/x86/xen/mmu_pv.c if (pmd_none(*pmd)) pmd 1106 arch/x86/xen/mmu_pv.c set_pmd(pmd, __pmd(0)); pmd 1137 arch/x86/xen/mmu_pv.c static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin) pmd 1143 arch/x86/xen/mmu_pv.c if (pmd_large(*pmd)) { pmd 1144 arch/x86/xen/mmu_pv.c pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; pmd 1149 arch/x86/xen/mmu_pv.c pte_tbl = pte_offset_kernel(pmd, 0); pmd 1156 arch/x86/xen/mmu_pv.c set_pmd(pmd, __pmd(0)); pmd 1775 arch/x86/xen/mmu_pv.c static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) pmd 1790 arch/x86/xen/mmu_pv.c if (pmd_present(pmd[pmdidx])) pmd 1791 arch/x86/xen/mmu_pv.c pte_page = m2v(pmd[pmdidx].pmd); pmd 1800 arch/x86/xen/mmu_pv.c pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); pmd 1821 arch/x86/xen/mmu_pv.c set_page_prot(pmd, PAGE_KERNEL_RO); pmd 2017 arch/x86/xen/mmu_pv.c pmd_t pmd; pmd 2035 arch/x86/xen/mmu_pv.c pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * pmd 2036 arch/x86/xen/mmu_pv.c sizeof(pmd))); pmd 2037 arch/x86/xen/mmu_pv.c if (!pmd_present(pmd)) pmd 2039 arch/x86/xen/mmu_pv.c pa = pmd_val(pmd) & PTE_PFN_MASK; pmd 2040 arch/x86/xen/mmu_pv.c if (pmd_large(pmd)) pmd 2062 arch/x86/xen/mmu_pv.c pmd_t *pmd; pmd 2100 arch/x86/xen/mmu_pv.c pmd = early_memremap(pmd_phys, PAGE_SIZE); pmd 2101 arch/x86/xen/mmu_pv.c clear_page(pmd); pmd 2118 arch/x86/xen/mmu_pv.c pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys); pmd 2122 arch/x86/xen/mmu_pv.c early_memunmap(pmd, PAGE_SIZE); pmd 2218 arch/x86/xen/mmu_pv.c static phys_addr_t __init xen_find_pt_base(pmd_t *pmd) pmd 2223 arch/x86/xen/mmu_pv.c pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd)); pmd 2226 arch/x86/xen/mmu_pv.c if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) { pmd 2227 arch/x86/xen/mmu_pv.c paddr = m2p(pmd[pmdidx].pmd); pmd 23 arch/xtensa/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 75 arch/xtensa/include/asm/pgalloc.h #define pmd_pgtable(pmd) pmd_page(pmd) pmd 245 arch/xtensa/include/asm/pgtable.h #define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) pmd 246 arch/xtensa/include/asm/pgtable.h #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) pmd 262 arch/xtensa/include/asm/pgtable.h #define pmd_none(pmd) (!pmd_val(pmd)) pmd 263 arch/xtensa/include/asm/pgtable.h #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) pmd 264 arch/xtensa/include/asm/pgtable.h #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) pmd 422 arch/xtensa/include/asm/pgtable.h #define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \ pmd 423 arch/xtensa/include/asm/pgtable.h srli pmd, pmd, PAGE_SHIFT; \ pmd 424 arch/xtensa/include/asm/pgtable.h slli pmd, pmd, PAGE_SHIFT; \ pmd 425 arch/xtensa/include/asm/pgtable.h addx4 pmd, tmp, pmd pmd 200 arch/xtensa/mm/fault.c pmd_t *pmd, *pmd_k; pmd 214 arch/xtensa/mm/fault.c pmd = pmd_offset(pgd, address); pmd 216 arch/xtensa/mm/fault.c if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) pmd 219 arch/xtensa/mm/fault.c pmd_val(*pmd) = pmd_val(*pmd_k); pmd 23 arch/xtensa/mm/kasan_init.c pmd_t *pmd = pmd_offset(pgd, vaddr); pmd 31 arch/xtensa/mm/kasan_init.c for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { pmd 32 arch/xtensa/mm/kasan_init.c BUG_ON(!pmd_none(*pmd)); pmd 33 arch/xtensa/mm/kasan_init.c set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); pmd 45 arch/xtensa/mm/kasan_init.c pmd_t *pmd = pmd_offset(pgd, vaddr); pmd 71 arch/xtensa/mm/kasan_init.c set_pmd(pmd + i, __pmd((unsigned long)pte)); pmd 25 arch/xtensa/mm/mmu.c pmd_t *pmd = pmd_offset(pgd, vaddr); pmd 42 arch/xtensa/mm/mmu.c for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { pmd 45 arch/xtensa/mm/mmu.c BUG_ON(!pmd_none(*pmd)); pmd 46 arch/xtensa/mm/mmu.c set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); pmd 47 arch/xtensa/mm/mmu.c BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); pmd 49 arch/xtensa/mm/mmu.c __func__, pmd, cur_pte); pmd 172 arch/xtensa/mm/tlb.c pmd_t *pmd; pmd 180 arch/xtensa/mm/tlb.c pmd = pmd_offset(pgd, vaddr); pmd 181 arch/xtensa/mm/tlb.c if (pmd_none_or_clear_bad(pmd)) pmd 183 arch/xtensa/mm/tlb.c pte = pte_offset_map(pmd, vaddr); pmd 515 drivers/edac/xgene_edac.c u32 pmd; pmd 533 drivers/edac/xgene_edac.c ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, pmd 573 drivers/edac/xgene_edac.c ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, pmd 617 drivers/edac/xgene_edac.c ctx->pmd * MAX_CPU_PER_PMD + cpu_idx, val, pmd 677 drivers/edac/xgene_edac.c ctx->pmd, val, val_hi, val_lo); pmd 729 drivers/edac/xgene_edac.c ctx->pmd, val, val_hi, val_lo); pmd 741 drivers/edac/xgene_edac.c if (!((PMD0_MERR_MASK << ctx->pmd) & pcp_hp_stat)) pmd 791 drivers/edac/xgene_edac.c PMD0_MERR_MASK << ctx->pmd); pmd 794 drivers/edac/xgene_edac.c PMD0_MERR_MASK << ctx->pmd); pmd 870 drivers/edac/xgene_edac.c snprintf(name, sizeof(name), "PMD%d", ctx->pmd); pmd 881 drivers/edac/xgene_edac.c static int xgene_edac_pmd_available(u32 efuse, int pmd) pmd 883 drivers/edac/xgene_edac.c return (efuse & (1 << pmd)) ? 0 : 1; pmd 893 drivers/edac/xgene_edac.c u32 pmd; pmd 901 drivers/edac/xgene_edac.c if (of_property_read_u32(np, "pmd-controller", &pmd)) { pmd 909 drivers/edac/xgene_edac.c if (!xgene_edac_pmd_available(val, pmd)) { pmd 914 drivers/edac/xgene_edac.c snprintf(edac_name, sizeof(edac_name), "l2c%d", pmd); pmd 925 drivers/edac/xgene_edac.c ctx->pmd = pmd; pmd 969 drivers/edac/xgene_edac.c dev_info(edac->dev, "X-Gene EDAC PMD%d registered\n", ctx->pmd); pmd 979 drivers/edac/xgene_edac.c static int xgene_edac_pmd_remove(struct xgene_edac_pmd_ctx *pmd) pmd 981 drivers/edac/xgene_edac.c struct edac_device_ctl_info *edac_dev = pmd->edac_dev; pmd 1817 drivers/edac/xgene_edac.c struct xgene_edac_pmd_ctx *pmd; pmd 1833 drivers/edac/xgene_edac.c list_for_each_entry(pmd, &ctx->pmds, next) { pmd 1834 drivers/edac/xgene_edac.c if ((PMD0_MERR_MASK << pmd->pmd) & pcp_hp_stat) pmd 1835 drivers/edac/xgene_edac.c xgene_edac_pmd_check(pmd->edac_dev); pmd 1971 drivers/edac/xgene_edac.c struct xgene_edac_pmd_ctx *pmd; pmd 1979 drivers/edac/xgene_edac.c list_for_each_entry_safe(pmd, temp_pmd, &edac->pmds, next) pmd 1980 drivers/edac/xgene_edac.c xgene_edac_pmd_remove(pmd); pmd 230 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd; pmd 390 drivers/md/dm-thin-metadata.c static inline void pmd_write_lock_in_core(struct dm_pool_metadata *pmd) pmd 391 drivers/md/dm-thin-metadata.c __acquires(pmd->root_lock) pmd 393 drivers/md/dm-thin-metadata.c down_write(&pmd->root_lock); pmd 396 drivers/md/dm-thin-metadata.c static inline void pmd_write_lock(struct dm_pool_metadata *pmd) pmd 398 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 399 drivers/md/dm-thin-metadata.c if (unlikely(!pmd->in_service)) pmd 400 drivers/md/dm-thin-metadata.c pmd->in_service = true; pmd 403 drivers/md/dm-thin-metadata.c static inline void pmd_write_unlock(struct dm_pool_metadata *pmd) pmd 404 drivers/md/dm-thin-metadata.c __releases(pmd->root_lock) pmd 406 drivers/md/dm-thin-metadata.c up_write(&pmd->root_lock); pmd 411 drivers/md/dm-thin-metadata.c static int superblock_lock_zero(struct dm_pool_metadata *pmd, pmd 414 drivers/md/dm-thin-metadata.c return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 418 drivers/md/dm-thin-metadata.c static int superblock_lock(struct dm_pool_metadata *pmd, pmd 421 drivers/md/dm-thin-metadata.c return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 454 drivers/md/dm-thin-metadata.c static void __setup_btree_details(struct dm_pool_metadata *pmd) pmd 456 drivers/md/dm-thin-metadata.c pmd->info.tm = pmd->tm; pmd 457 drivers/md/dm-thin-metadata.c pmd->info.levels = 2; pmd 458 drivers/md/dm-thin-metadata.c pmd->info.value_type.context = pmd->data_sm; pmd 459 drivers/md/dm-thin-metadata.c pmd->info.value_type.size = sizeof(__le64); pmd 460 drivers/md/dm-thin-metadata.c pmd->info.value_type.inc = data_block_inc; pmd 461 drivers/md/dm-thin-metadata.c pmd->info.value_type.dec = data_block_dec; pmd 462 drivers/md/dm-thin-metadata.c pmd->info.value_type.equal = data_block_equal; pmd 464 drivers/md/dm-thin-metadata.c memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info)); pmd 465 drivers/md/dm-thin-metadata.c pmd->nb_info.tm = pmd->nb_tm; pmd 467 drivers/md/dm-thin-metadata.c pmd->tl_info.tm = pmd->tm; pmd 468 drivers/md/dm-thin-metadata.c pmd->tl_info.levels = 1; pmd 469 drivers/md/dm-thin-metadata.c pmd->tl_info.value_type.context = &pmd->bl_info; pmd 470 drivers/md/dm-thin-metadata.c pmd->tl_info.value_type.size = sizeof(__le64); pmd 471 drivers/md/dm-thin-metadata.c pmd->tl_info.value_type.inc = subtree_inc; pmd 472 drivers/md/dm-thin-metadata.c pmd->tl_info.value_type.dec = subtree_dec; pmd 473 drivers/md/dm-thin-metadata.c pmd->tl_info.value_type.equal = subtree_equal; pmd 475 drivers/md/dm-thin-metadata.c pmd->bl_info.tm = pmd->tm; pmd 476 drivers/md/dm-thin-metadata.c pmd->bl_info.levels = 1; pmd 477 drivers/md/dm-thin-metadata.c pmd->bl_info.value_type.context = pmd->data_sm; pmd 478 drivers/md/dm-thin-metadata.c pmd->bl_info.value_type.size = sizeof(__le64); pmd 479 drivers/md/dm-thin-metadata.c pmd->bl_info.value_type.inc = data_block_inc; pmd 480 drivers/md/dm-thin-metadata.c pmd->bl_info.value_type.dec = data_block_dec; pmd 481 drivers/md/dm-thin-metadata.c pmd->bl_info.value_type.equal = data_block_equal; pmd 483 drivers/md/dm-thin-metadata.c pmd->details_info.tm = pmd->tm; pmd 484 drivers/md/dm-thin-metadata.c pmd->details_info.levels = 1; pmd 485 drivers/md/dm-thin-metadata.c pmd->details_info.value_type.context = NULL; pmd 486 drivers/md/dm-thin-metadata.c pmd->details_info.value_type.size = sizeof(struct disk_device_details); pmd 487 drivers/md/dm-thin-metadata.c pmd->details_info.value_type.inc = NULL; pmd 488 drivers/md/dm-thin-metadata.c pmd->details_info.value_type.dec = NULL; pmd 489 drivers/md/dm-thin-metadata.c pmd->details_info.value_type.equal = NULL; pmd 492 drivers/md/dm-thin-metadata.c static int save_sm_roots(struct dm_pool_metadata *pmd) pmd 497 drivers/md/dm-thin-metadata.c r = dm_sm_root_size(pmd->metadata_sm, &len); pmd 501 drivers/md/dm-thin-metadata.c r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); pmd 505 drivers/md/dm-thin-metadata.c r = dm_sm_root_size(pmd->data_sm, &len); pmd 509 drivers/md/dm-thin-metadata.c return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); pmd 512 drivers/md/dm-thin-metadata.c static void copy_sm_roots(struct dm_pool_metadata *pmd, pmd 516 drivers/md/dm-thin-metadata.c &pmd->metadata_space_map_root, pmd 517 drivers/md/dm-thin-metadata.c sizeof(pmd->metadata_space_map_root)); pmd 520 drivers/md/dm-thin-metadata.c &pmd->data_space_map_root, pmd 521 drivers/md/dm-thin-metadata.c sizeof(pmd->data_space_map_root)); pmd 524 drivers/md/dm-thin-metadata.c static int __write_initial_superblock(struct dm_pool_metadata *pmd) pmd 529 drivers/md/dm-thin-metadata.c sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; pmd 534 drivers/md/dm-thin-metadata.c r = dm_sm_commit(pmd->data_sm); pmd 538 drivers/md/dm-thin-metadata.c r = dm_tm_pre_commit(pmd->tm); pmd 542 drivers/md/dm-thin-metadata.c r = save_sm_roots(pmd); pmd 546 drivers/md/dm-thin-metadata.c r = superblock_lock_zero(pmd, &sblock); pmd 559 drivers/md/dm-thin-metadata.c copy_sm_roots(pmd, disk_super); pmd 561 drivers/md/dm-thin-metadata.c disk_super->data_mapping_root = cpu_to_le64(pmd->root); pmd 562 drivers/md/dm-thin-metadata.c disk_super->device_details_root = cpu_to_le64(pmd->details_root); pmd 565 drivers/md/dm-thin-metadata.c disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); pmd 567 drivers/md/dm-thin-metadata.c return dm_tm_commit(pmd->tm, sblock); pmd 570 drivers/md/dm-thin-metadata.c static int __format_metadata(struct dm_pool_metadata *pmd) pmd 574 drivers/md/dm-thin-metadata.c r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 575 drivers/md/dm-thin-metadata.c &pmd->tm, &pmd->metadata_sm); pmd 581 drivers/md/dm-thin-metadata.c pmd->data_sm = dm_sm_disk_create(pmd->tm, 0); pmd 582 drivers/md/dm-thin-metadata.c if (IS_ERR(pmd->data_sm)) { pmd 584 drivers/md/dm-thin-metadata.c r = PTR_ERR(pmd->data_sm); pmd 588 drivers/md/dm-thin-metadata.c pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); pmd 589 drivers/md/dm-thin-metadata.c if (!pmd->nb_tm) { pmd 595 drivers/md/dm-thin-metadata.c __setup_btree_details(pmd); pmd 597 drivers/md/dm-thin-metadata.c r = dm_btree_empty(&pmd->info, &pmd->root); pmd 601 drivers/md/dm-thin-metadata.c r = dm_btree_empty(&pmd->details_info, &pmd->details_root); pmd 607 drivers/md/dm-thin-metadata.c r = __write_initial_superblock(pmd); pmd 614 drivers/md/dm-thin-metadata.c dm_tm_destroy(pmd->nb_tm); pmd 616 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->data_sm); pmd 618 drivers/md/dm-thin-metadata.c dm_tm_destroy(pmd->tm); pmd 619 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->metadata_sm); pmd 625 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd) pmd 639 drivers/md/dm-thin-metadata.c if (get_disk_ro(pmd->bdev->bd_disk)) pmd 652 drivers/md/dm-thin-metadata.c static int __open_metadata(struct dm_pool_metadata *pmd) pmd 658 drivers/md/dm-thin-metadata.c r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 668 drivers/md/dm-thin-metadata.c if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { pmd 671 drivers/md/dm-thin-metadata.c (unsigned long long)pmd->data_block_size); pmd 676 drivers/md/dm-thin-metadata.c r = __check_incompat_features(disk_super, pmd); pmd 680 drivers/md/dm-thin-metadata.c r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 683 drivers/md/dm-thin-metadata.c &pmd->tm, &pmd->metadata_sm); pmd 689 drivers/md/dm-thin-metadata.c pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root, pmd 691 drivers/md/dm-thin-metadata.c if (IS_ERR(pmd->data_sm)) { pmd 693 drivers/md/dm-thin-metadata.c r = PTR_ERR(pmd->data_sm); pmd 697 drivers/md/dm-thin-metadata.c pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); pmd 698 drivers/md/dm-thin-metadata.c if (!pmd->nb_tm) { pmd 704 drivers/md/dm-thin-metadata.c __setup_btree_details(pmd); pmd 710 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->data_sm); pmd 712 drivers/md/dm-thin-metadata.c dm_tm_destroy(pmd->tm); pmd 713 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->metadata_sm); pmd 720 drivers/md/dm-thin-metadata.c static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device) pmd 724 drivers/md/dm-thin-metadata.c r = __superblock_all_zeroes(pmd->bm, &unformatted); pmd 729 drivers/md/dm-thin-metadata.c return format_device ? __format_metadata(pmd) : -EPERM; pmd 731 drivers/md/dm-thin-metadata.c return __open_metadata(pmd); pmd 734 drivers/md/dm-thin-metadata.c static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device) pmd 738 drivers/md/dm-thin-metadata.c pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, pmd 740 drivers/md/dm-thin-metadata.c if (IS_ERR(pmd->bm)) { pmd 742 drivers/md/dm-thin-metadata.c return PTR_ERR(pmd->bm); pmd 745 drivers/md/dm-thin-metadata.c r = __open_or_format_metadata(pmd, format_device); pmd 747 drivers/md/dm-thin-metadata.c dm_block_manager_destroy(pmd->bm); pmd 752 drivers/md/dm-thin-metadata.c static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) pmd 754 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->data_sm); pmd 755 drivers/md/dm-thin-metadata.c dm_sm_destroy(pmd->metadata_sm); pmd 756 drivers/md/dm-thin-metadata.c dm_tm_destroy(pmd->nb_tm); pmd 757 drivers/md/dm-thin-metadata.c dm_tm_destroy(pmd->tm); pmd 758 drivers/md/dm-thin-metadata.c dm_block_manager_destroy(pmd->bm); pmd 761 drivers/md/dm-thin-metadata.c static int __begin_transaction(struct dm_pool_metadata *pmd) pmd 771 drivers/md/dm-thin-metadata.c r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 777 drivers/md/dm-thin-metadata.c pmd->time = le32_to_cpu(disk_super->time); pmd 778 drivers/md/dm-thin-metadata.c pmd->root = le64_to_cpu(disk_super->data_mapping_root); pmd 779 drivers/md/dm-thin-metadata.c pmd->details_root = le64_to_cpu(disk_super->device_details_root); pmd 780 drivers/md/dm-thin-metadata.c pmd->trans_id = le64_to_cpu(disk_super->trans_id); pmd 781 drivers/md/dm-thin-metadata.c pmd->flags = le32_to_cpu(disk_super->flags); pmd 782 drivers/md/dm-thin-metadata.c pmd->data_block_size = le32_to_cpu(disk_super->data_block_size); pmd 788 drivers/md/dm-thin-metadata.c static int __write_changed_details(struct dm_pool_metadata *pmd) pmd 795 drivers/md/dm-thin-metadata.c list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { pmd 807 drivers/md/dm-thin-metadata.c r = dm_btree_insert(&pmd->details_info, pmd->details_root, pmd 808 drivers/md/dm-thin-metadata.c &key, &details, &pmd->details_root); pmd 823 drivers/md/dm-thin-metadata.c static int __commit_transaction(struct dm_pool_metadata *pmd) pmd 833 drivers/md/dm-thin-metadata.c BUG_ON(!rwsem_is_locked(&pmd->root_lock)); pmd 835 drivers/md/dm-thin-metadata.c if (unlikely(!pmd->in_service)) pmd 838 drivers/md/dm-thin-metadata.c if (pmd->pre_commit_fn) { pmd 839 drivers/md/dm-thin-metadata.c r = pmd->pre_commit_fn(pmd->pre_commit_context); pmd 846 drivers/md/dm-thin-metadata.c r = __write_changed_details(pmd); pmd 850 drivers/md/dm-thin-metadata.c r = dm_sm_commit(pmd->data_sm); pmd 854 drivers/md/dm-thin-metadata.c r = dm_tm_pre_commit(pmd->tm); pmd 858 drivers/md/dm-thin-metadata.c r = save_sm_roots(pmd); pmd 862 drivers/md/dm-thin-metadata.c r = superblock_lock(pmd, &sblock); pmd 867 drivers/md/dm-thin-metadata.c disk_super->time = cpu_to_le32(pmd->time); pmd 868 drivers/md/dm-thin-metadata.c disk_super->data_mapping_root = cpu_to_le64(pmd->root); pmd 869 drivers/md/dm-thin-metadata.c disk_super->device_details_root = cpu_to_le64(pmd->details_root); pmd 870 drivers/md/dm-thin-metadata.c disk_super->trans_id = cpu_to_le64(pmd->trans_id); pmd 871 drivers/md/dm-thin-metadata.c disk_super->flags = cpu_to_le32(pmd->flags); pmd 873 drivers/md/dm-thin-metadata.c copy_sm_roots(pmd, disk_super); pmd 875 drivers/md/dm-thin-metadata.c return dm_tm_commit(pmd->tm, sblock); pmd 878 drivers/md/dm-thin-metadata.c static void __set_metadata_reserve(struct dm_pool_metadata *pmd) pmd 884 drivers/md/dm-thin-metadata.c r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total); pmd 887 drivers/md/dm-thin-metadata.c pmd->metadata_reserve = max_blocks; pmd 889 drivers/md/dm-thin-metadata.c pmd->metadata_reserve = min(max_blocks, div_u64(total, 10)); pmd 897 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd; pmd 899 drivers/md/dm-thin-metadata.c pmd = kmalloc(sizeof(*pmd), GFP_KERNEL); pmd 900 drivers/md/dm-thin-metadata.c if (!pmd) { pmd 905 drivers/md/dm-thin-metadata.c init_rwsem(&pmd->root_lock); pmd 906 drivers/md/dm-thin-metadata.c pmd->time = 0; pmd 907 drivers/md/dm-thin-metadata.c INIT_LIST_HEAD(&pmd->thin_devices); pmd 908 drivers/md/dm-thin-metadata.c pmd->fail_io = false; pmd 909 drivers/md/dm-thin-metadata.c pmd->in_service = false; pmd 910 drivers/md/dm-thin-metadata.c pmd->bdev = bdev; pmd 911 drivers/md/dm-thin-metadata.c pmd->data_block_size = data_block_size; pmd 912 drivers/md/dm-thin-metadata.c pmd->pre_commit_fn = NULL; pmd 913 drivers/md/dm-thin-metadata.c pmd->pre_commit_context = NULL; pmd 915 drivers/md/dm-thin-metadata.c r = __create_persistent_data_objects(pmd, format_device); pmd 917 drivers/md/dm-thin-metadata.c kfree(pmd); pmd 921 drivers/md/dm-thin-metadata.c r = __begin_transaction(pmd); pmd 923 drivers/md/dm-thin-metadata.c if (dm_pool_metadata_close(pmd) < 0) pmd 928 drivers/md/dm-thin-metadata.c __set_metadata_reserve(pmd); pmd 930 drivers/md/dm-thin-metadata.c return pmd; pmd 933 drivers/md/dm-thin-metadata.c int dm_pool_metadata_close(struct dm_pool_metadata *pmd) pmd 939 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 940 drivers/md/dm-thin-metadata.c list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { pmd 948 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 956 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 957 drivers/md/dm-thin-metadata.c if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { pmd 958 drivers/md/dm-thin-metadata.c r = __commit_transaction(pmd); pmd 963 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 964 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 965 drivers/md/dm-thin-metadata.c __destroy_persistent_data_objects(pmd); pmd 967 drivers/md/dm-thin-metadata.c kfree(pmd); pmd 976 drivers/md/dm-thin-metadata.c static int __open_device(struct dm_pool_metadata *pmd, pmd 988 drivers/md/dm-thin-metadata.c list_for_each_entry(td2, &pmd->thin_devices, list) pmd 1004 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->details_info, pmd->details_root, pmd 1015 drivers/md/dm-thin-metadata.c details_le.transaction_id = cpu_to_le64(pmd->trans_id); pmd 1016 drivers/md/dm-thin-metadata.c details_le.creation_time = cpu_to_le32(pmd->time); pmd 1017 drivers/md/dm-thin-metadata.c details_le.snapshotted_time = cpu_to_le32(pmd->time); pmd 1024 drivers/md/dm-thin-metadata.c (*td)->pmd = pmd; pmd 1034 drivers/md/dm-thin-metadata.c list_add(&(*td)->list, &pmd->thin_devices); pmd 1044 drivers/md/dm-thin-metadata.c static int __create_thin(struct dm_pool_metadata *pmd, pmd 1054 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->details_info, pmd->details_root, pmd 1062 drivers/md/dm-thin-metadata.c r = dm_btree_empty(&pmd->bl_info, &dev_root); pmd 1071 drivers/md/dm-thin-metadata.c r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); pmd 1073 drivers/md/dm-thin-metadata.c dm_btree_del(&pmd->bl_info, dev_root); pmd 1077 drivers/md/dm-thin-metadata.c r = __open_device(pmd, dev, 1, &td); pmd 1079 drivers/md/dm-thin-metadata.c dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); pmd 1080 drivers/md/dm-thin-metadata.c dm_btree_del(&pmd->bl_info, dev_root); pmd 1088 drivers/md/dm-thin-metadata.c int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev) pmd 1092 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1093 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1094 drivers/md/dm-thin-metadata.c r = __create_thin(pmd, dev); pmd 1095 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1100 drivers/md/dm-thin-metadata.c static int __set_snapshot_details(struct dm_pool_metadata *pmd, pmd 1107 drivers/md/dm-thin-metadata.c r = __open_device(pmd, origin, 0, &td); pmd 1121 drivers/md/dm-thin-metadata.c static int __create_snap(struct dm_pool_metadata *pmd, pmd 1132 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->details_info, pmd->details_root, pmd 1138 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value); pmd 1144 drivers/md/dm-thin-metadata.c dm_tm_inc(pmd->tm, origin_root); pmd 1150 drivers/md/dm-thin-metadata.c r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root); pmd 1152 drivers/md/dm-thin-metadata.c dm_tm_dec(pmd->tm, origin_root); pmd 1156 drivers/md/dm-thin-metadata.c pmd->time++; pmd 1158 drivers/md/dm-thin-metadata.c r = __open_device(pmd, dev, 1, &td); pmd 1162 drivers/md/dm-thin-metadata.c r = __set_snapshot_details(pmd, td, origin, pmd->time); pmd 1171 drivers/md/dm-thin-metadata.c dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); pmd 1172 drivers/md/dm-thin-metadata.c dm_btree_remove(&pmd->details_info, pmd->details_root, pmd 1173 drivers/md/dm-thin-metadata.c &key, &pmd->details_root); pmd 1177 drivers/md/dm-thin-metadata.c int dm_pool_create_snap(struct dm_pool_metadata *pmd, pmd 1183 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1184 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1185 drivers/md/dm-thin-metadata.c r = __create_snap(pmd, dev, origin); pmd 1186 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1191 drivers/md/dm-thin-metadata.c static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev) pmd 1198 drivers/md/dm-thin-metadata.c r = __open_device(pmd, dev, 0, &td); pmd 1209 drivers/md/dm-thin-metadata.c r = dm_btree_remove(&pmd->details_info, pmd->details_root, pmd 1210 drivers/md/dm-thin-metadata.c &key, &pmd->details_root); pmd 1214 drivers/md/dm-thin-metadata.c r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root); pmd 1221 drivers/md/dm-thin-metadata.c int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, pmd 1226 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1227 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1228 drivers/md/dm-thin-metadata.c r = __delete_device(pmd, dev); pmd 1229 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1234 drivers/md/dm-thin-metadata.c int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, pmd 1240 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1242 drivers/md/dm-thin-metadata.c if (pmd->fail_io) pmd 1245 drivers/md/dm-thin-metadata.c if (pmd->trans_id != current_id) { pmd 1250 drivers/md/dm-thin-metadata.c pmd->trans_id = new_id; pmd 1254 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1259 drivers/md/dm-thin-metadata.c int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, pmd 1264 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1265 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) { pmd 1266 drivers/md/dm-thin-metadata.c *result = pmd->trans_id; pmd 1269 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1274 drivers/md/dm-thin-metadata.c static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) pmd 1285 drivers/md/dm-thin-metadata.c r = __commit_transaction(pmd); pmd 1295 drivers/md/dm-thin-metadata.c dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); pmd 1296 drivers/md/dm-thin-metadata.c r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION, pmd 1309 drivers/md/dm-thin-metadata.c dm_tm_dec(pmd->tm, held_root); pmd 1310 drivers/md/dm-thin-metadata.c dm_tm_unlock(pmd->tm, copy); pmd 1325 drivers/md/dm-thin-metadata.c dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root)); pmd 1326 drivers/md/dm-thin-metadata.c dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root)); pmd 1327 drivers/md/dm-thin-metadata.c dm_tm_unlock(pmd->tm, copy); pmd 1332 drivers/md/dm-thin-metadata.c r = superblock_lock(pmd, &sblock); pmd 1334 drivers/md/dm-thin-metadata.c dm_tm_dec(pmd->tm, held_root); pmd 1344 drivers/md/dm-thin-metadata.c int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) pmd 1348 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1349 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1350 drivers/md/dm-thin-metadata.c r = __reserve_metadata_snap(pmd); pmd 1351 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1356 drivers/md/dm-thin-metadata.c static int __release_metadata_snap(struct dm_pool_metadata *pmd) pmd 1363 drivers/md/dm-thin-metadata.c r = superblock_lock(pmd, &sblock); pmd 1378 drivers/md/dm-thin-metadata.c r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, ©); pmd 1383 drivers/md/dm-thin-metadata.c dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); pmd 1384 drivers/md/dm-thin-metadata.c dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); pmd 1385 drivers/md/dm-thin-metadata.c dm_sm_dec_block(pmd->metadata_sm, held_root); pmd 1387 drivers/md/dm-thin-metadata.c dm_tm_unlock(pmd->tm, copy); pmd 1392 drivers/md/dm-thin-metadata.c int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) pmd 1396 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1397 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1398 drivers/md/dm-thin-metadata.c r = __release_metadata_snap(pmd); pmd 1399 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1404 drivers/md/dm-thin-metadata.c static int __get_metadata_snap(struct dm_pool_metadata *pmd, pmd 1411 drivers/md/dm-thin-metadata.c r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, pmd 1424 drivers/md/dm-thin-metadata.c int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, pmd 1429 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1430 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1431 drivers/md/dm-thin-metadata.c r = __get_metadata_snap(pmd, result); pmd 1432 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1437 drivers/md/dm-thin-metadata.c int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, pmd 1442 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 1443 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1444 drivers/md/dm-thin-metadata.c r = __open_device(pmd, dev, 0, td); pmd 1445 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1452 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(td->pmd); pmd 1454 drivers/md/dm-thin-metadata.c pmd_write_unlock(td->pmd); pmd 1493 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1498 drivers/md/dm-thin-metadata.c info = &pmd->info; pmd 1500 drivers/md/dm-thin-metadata.c info = &pmd->nb_info; pmd 1502 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(info, pmd->root, keys, &value); pmd 1513 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1515 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1516 drivers/md/dm-thin-metadata.c if (pmd->fail_io) { pmd 1517 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1523 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1533 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1536 drivers/md/dm-thin-metadata.c r = dm_btree_lookup_next(&pmd->info, pmd->root, keys, vblock, &value); pmd 1595 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1597 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1598 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) { pmd 1602 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1612 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1615 drivers/md/dm-thin-metadata.c value = cpu_to_le64(pack_block_time(data_block, pmd->time)); pmd 1618 drivers/md/dm-thin-metadata.c r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value, pmd 1619 drivers/md/dm-thin-metadata.c &pmd->root, &inserted); pmd 1635 drivers/md/dm-thin-metadata.c pmd_write_lock(td->pmd); pmd 1636 drivers/md/dm-thin-metadata.c if (!td->pmd->fail_io) pmd 1638 drivers/md/dm-thin-metadata.c pmd_write_unlock(td->pmd); pmd 1646 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1649 drivers/md/dm-thin-metadata.c r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root); pmd 1663 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1671 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->tl_info, pmd->root, keys, &value); pmd 1680 drivers/md/dm-thin-metadata.c dm_tm_inc(pmd->tm, mapping_root); pmd 1681 drivers/md/dm-thin-metadata.c r = dm_btree_remove(&pmd->tl_info, pmd->root, keys, &pmd->root); pmd 1690 drivers/md/dm-thin-metadata.c r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value); pmd 1700 drivers/md/dm-thin-metadata.c r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count); pmd 1715 drivers/md/dm-thin-metadata.c return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root); pmd 1722 drivers/md/dm-thin-metadata.c pmd_write_lock(td->pmd); pmd 1723 drivers/md/dm-thin-metadata.c if (!td->pmd->fail_io) pmd 1725 drivers/md/dm-thin-metadata.c pmd_write_unlock(td->pmd); pmd 1735 drivers/md/dm-thin-metadata.c pmd_write_lock(td->pmd); pmd 1736 drivers/md/dm-thin-metadata.c if (!td->pmd->fail_io) pmd 1738 drivers/md/dm-thin-metadata.c pmd_write_unlock(td->pmd); pmd 1743 drivers/md/dm-thin-metadata.c int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) pmd 1748 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1749 drivers/md/dm-thin-metadata.c r = dm_sm_get_count(pmd->data_sm, b, &ref_count); pmd 1752 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1757 drivers/md/dm-thin-metadata.c int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) pmd 1761 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1763 drivers/md/dm-thin-metadata.c r = dm_sm_inc_block(pmd->data_sm, b); pmd 1767 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1772 drivers/md/dm-thin-metadata.c int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e) pmd 1776 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1778 drivers/md/dm-thin-metadata.c r = dm_sm_dec_block(pmd->data_sm, b); pmd 1782 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1791 drivers/md/dm-thin-metadata.c down_read(&td->pmd->root_lock); pmd 1793 drivers/md/dm-thin-metadata.c up_read(&td->pmd->root_lock); pmd 1798 drivers/md/dm-thin-metadata.c bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) pmd 1803 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1804 drivers/md/dm-thin-metadata.c list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { pmd 1810 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1819 drivers/md/dm-thin-metadata.c down_read(&td->pmd->root_lock); pmd 1821 drivers/md/dm-thin-metadata.c up_read(&td->pmd->root_lock); pmd 1826 drivers/md/dm-thin-metadata.c int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) pmd 1830 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1831 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1832 drivers/md/dm-thin-metadata.c r = dm_sm_new_block(pmd->data_sm, result); pmd 1833 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1838 drivers/md/dm-thin-metadata.c int dm_pool_commit_metadata(struct dm_pool_metadata *pmd) pmd 1846 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 1847 drivers/md/dm-thin-metadata.c if (pmd->fail_io) pmd 1850 drivers/md/dm-thin-metadata.c r = __commit_transaction(pmd); pmd 1857 drivers/md/dm-thin-metadata.c r = __begin_transaction(pmd); pmd 1859 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1863 drivers/md/dm-thin-metadata.c static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) pmd 1867 drivers/md/dm-thin-metadata.c list_for_each_entry(td, &pmd->thin_devices, list) pmd 1871 drivers/md/dm-thin-metadata.c int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) pmd 1875 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 1876 drivers/md/dm-thin-metadata.c if (pmd->fail_io) pmd 1879 drivers/md/dm-thin-metadata.c __set_abort_with_changes_flags(pmd); pmd 1880 drivers/md/dm-thin-metadata.c __destroy_persistent_data_objects(pmd); pmd 1881 drivers/md/dm-thin-metadata.c r = __create_persistent_data_objects(pmd, false); pmd 1883 drivers/md/dm-thin-metadata.c pmd->fail_io = true; pmd 1886 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 1891 drivers/md/dm-thin-metadata.c int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result) pmd 1895 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1896 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1897 drivers/md/dm-thin-metadata.c r = dm_sm_get_nr_free(pmd->data_sm, result); pmd 1898 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1903 drivers/md/dm-thin-metadata.c int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, pmd 1908 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1909 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1910 drivers/md/dm-thin-metadata.c r = dm_sm_get_nr_free(pmd->metadata_sm, result); pmd 1913 drivers/md/dm-thin-metadata.c if (*result < pmd->metadata_reserve) pmd 1916 drivers/md/dm-thin-metadata.c *result -= pmd->metadata_reserve; pmd 1918 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1923 drivers/md/dm-thin-metadata.c int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, pmd 1928 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1929 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1930 drivers/md/dm-thin-metadata.c r = dm_sm_get_nr_blocks(pmd->metadata_sm, result); pmd 1931 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1936 drivers/md/dm-thin-metadata.c int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) pmd 1940 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1941 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1942 drivers/md/dm-thin-metadata.c r = dm_sm_get_nr_blocks(pmd->data_sm, result); pmd 1943 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1951 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1953 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1954 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) { pmd 1958 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 1968 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1970 drivers/md/dm-thin-metadata.c r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le); pmd 1976 drivers/md/dm-thin-metadata.c return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result); pmd 1983 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *pmd = td->pmd; pmd 1985 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 1986 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 1988 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 2013 drivers/md/dm-thin-metadata.c int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) pmd 2017 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 2018 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 2019 drivers/md/dm-thin-metadata.c r = __resize_space_map(pmd->data_sm, new_count); pmd 2020 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2025 drivers/md/dm-thin-metadata.c int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) pmd 2029 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 2030 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) { pmd 2031 drivers/md/dm-thin-metadata.c r = __resize_space_map(pmd->metadata_sm, new_count); pmd 2033 drivers/md/dm-thin-metadata.c __set_metadata_reserve(pmd); pmd 2035 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2040 drivers/md/dm-thin-metadata.c void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) pmd 2042 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 2043 drivers/md/dm-thin-metadata.c dm_bm_set_read_only(pmd->bm); pmd 2044 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2047 drivers/md/dm-thin-metadata.c void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd) pmd 2049 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 2050 drivers/md/dm-thin-metadata.c dm_bm_set_read_write(pmd->bm); pmd 2051 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2054 drivers/md/dm-thin-metadata.c int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, pmd 2061 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 2062 drivers/md/dm-thin-metadata.c r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); pmd 2063 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2068 drivers/md/dm-thin-metadata.c void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, pmd 2072 drivers/md/dm-thin-metadata.c pmd_write_lock_in_core(pmd); pmd 2073 drivers/md/dm-thin-metadata.c pmd->pre_commit_fn = fn; pmd 2074 drivers/md/dm-thin-metadata.c pmd->pre_commit_context = context; pmd 2075 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2078 drivers/md/dm-thin-metadata.c int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) pmd 2084 drivers/md/dm-thin-metadata.c pmd_write_lock(pmd); pmd 2085 drivers/md/dm-thin-metadata.c if (pmd->fail_io) pmd 2088 drivers/md/dm-thin-metadata.c pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; pmd 2090 drivers/md/dm-thin-metadata.c r = superblock_lock(pmd, &sblock); pmd 2097 drivers/md/dm-thin-metadata.c disk_super->flags = cpu_to_le32(pmd->flags); pmd 2101 drivers/md/dm-thin-metadata.c pmd_write_unlock(pmd); pmd 2105 drivers/md/dm-thin-metadata.c bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) pmd 2109 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 2110 drivers/md/dm-thin-metadata.c needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; pmd 2111 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 2116 drivers/md/dm-thin-metadata.c void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd) pmd 2118 drivers/md/dm-thin-metadata.c down_read(&pmd->root_lock); pmd 2119 drivers/md/dm-thin-metadata.c if (!pmd->fail_io) pmd 2120 drivers/md/dm-thin-metadata.c dm_tm_issue_prefetches(pmd->tm); pmd 2121 drivers/md/dm-thin-metadata.c up_read(&pmd->root_lock); pmd 48 drivers/md/dm-thin-metadata.h int dm_pool_metadata_close(struct dm_pool_metadata *pmd); pmd 61 drivers/md/dm-thin-metadata.h int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev); pmd 69 drivers/md/dm-thin-metadata.h int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev, pmd 77 drivers/md/dm-thin-metadata.h int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, pmd 84 drivers/md/dm-thin-metadata.h int dm_pool_commit_metadata(struct dm_pool_metadata *pmd); pmd 94 drivers/md/dm-thin-metadata.h int dm_pool_abort_metadata(struct dm_pool_metadata *pmd); pmd 99 drivers/md/dm-thin-metadata.h int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, pmd 103 drivers/md/dm-thin-metadata.h int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, pmd 115 drivers/md/dm-thin-metadata.h int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd); pmd 116 drivers/md/dm-thin-metadata.h int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd); pmd 118 drivers/md/dm-thin-metadata.h int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, pmd 128 drivers/md/dm-thin-metadata.h int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, pmd 161 drivers/md/dm-thin-metadata.h int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result); pmd 178 drivers/md/dm-thin-metadata.h bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd); pmd 187 drivers/md/dm-thin-metadata.h int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, pmd 190 drivers/md/dm-thin-metadata.h int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, pmd 193 drivers/md/dm-thin-metadata.h int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, pmd 196 drivers/md/dm-thin-metadata.h int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); pmd 198 drivers/md/dm-thin-metadata.h int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); pmd 200 drivers/md/dm-thin-metadata.h int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); pmd 201 drivers/md/dm-thin-metadata.h int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); pmd 207 drivers/md/dm-thin-metadata.h int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); pmd 208 drivers/md/dm-thin-metadata.h int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); pmd 214 drivers/md/dm-thin-metadata.h void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); pmd 215 drivers/md/dm-thin-metadata.h void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd); pmd 217 drivers/md/dm-thin-metadata.h int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, pmd 225 drivers/md/dm-thin-metadata.h int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); pmd 226 drivers/md/dm-thin-metadata.h bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); pmd 231 drivers/md/dm-thin-metadata.h void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd); pmd 236 drivers/md/dm-thin-metadata.h void dm_pool_register_pre_commit_callback(struct dm_pool_metadata *pmd, pmd 236 drivers/md/dm-thin.c struct dm_pool_metadata *pmd; pmd 1097 drivers/md/dm-thin.c r = dm_pool_block_is_shared(pool->pmd, b, &shared); pmd 1110 drivers/md/dm-thin.c r = dm_pool_block_is_shared(pool->pmd, e, &shared); pmd 1175 drivers/md/dm-thin.c r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); pmd 1216 drivers/md/dm-thin.c r = dm_pool_dec_data_range(pool->pmd, m->data_block, pmd 1461 drivers/md/dm-thin.c r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); pmd 1481 drivers/md/dm-thin.c r = dm_pool_get_free_block_count(pool->pmd, &nr_free); pmd 1502 drivers/md/dm-thin.c r = dm_pool_commit_metadata(pool->pmd); pmd 1536 drivers/md/dm-thin.c r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); pmd 1553 drivers/md/dm-thin.c r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); pmd 1565 drivers/md/dm-thin.c r = dm_pool_alloc_data_block(pool->pmd, result); pmd 1574 drivers/md/dm-thin.c r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); pmd 2225 drivers/md/dm-thin.c dm_pool_issue_prefetches(pool->pmd); pmd 2382 drivers/md/dm-thin.c !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) pmd 2414 drivers/md/dm-thin.c dm_pool_issue_prefetches(pool->pmd); pmd 2539 drivers/md/dm-thin.c bool needs_check = dm_pool_metadata_needs_check(pool->pmd); pmd 2565 drivers/md/dm-thin.c dm_pool_metadata_read_only(pool->pmd); pmd 2578 drivers/md/dm-thin.c dm_pool_metadata_read_only(pool->pmd); pmd 2614 drivers/md/dm-thin.c dm_pool_metadata_read_write(pool->pmd); pmd 2639 drivers/md/dm-thin.c if (dm_pool_abort_metadata(pool->pmd)) { pmd 2644 drivers/md/dm-thin.c if (dm_pool_metadata_set_needs_check(pool->pmd)) { pmd 2928 drivers/md/dm-thin.c if (dm_pool_metadata_close(pool->pmd) < 0) pmd 2956 drivers/md/dm-thin.c struct dm_pool_metadata *pmd; pmd 2959 drivers/md/dm-thin.c pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); pmd 2960 drivers/md/dm-thin.c if (IS_ERR(pmd)) { pmd 2962 drivers/md/dm-thin.c return (struct pool *)pmd; pmd 2972 drivers/md/dm-thin.c pool->pmd = pmd; pmd 3077 drivers/md/dm-thin.c if (dm_pool_metadata_close(pmd)) pmd 3424 drivers/md/dm-thin.c r = dm_pool_register_metadata_threshold(pt->pool->pmd, pmd 3482 drivers/md/dm-thin.c r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); pmd 3496 drivers/md/dm-thin.c if (dm_pool_metadata_needs_check(pool->pmd)) { pmd 3506 drivers/md/dm-thin.c r = dm_pool_resize_data_dev(pool->pmd, data_size); pmd 3529 drivers/md/dm-thin.c r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); pmd 3543 drivers/md/dm-thin.c if (dm_pool_metadata_needs_check(pool->pmd)) { pmd 3557 drivers/md/dm-thin.c r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); pmd 3594 drivers/md/dm-thin.c dm_pool_register_pre_commit_callback(pool->pmd, pmd 3729 drivers/md/dm-thin.c r = dm_pool_create_thin(pool->pmd, dev_id); pmd 3757 drivers/md/dm-thin.c r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); pmd 3780 drivers/md/dm-thin.c r = dm_pool_delete_thin_device(pool->pmd, dev_id); pmd 3806 drivers/md/dm-thin.c r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); pmd 3826 drivers/md/dm-thin.c r = dm_pool_reserve_metadata_snap(pool->pmd); pmd 3841 drivers/md/dm-thin.c r = dm_pool_release_metadata_snap(pool->pmd); pmd 3955 drivers/md/dm-thin.c r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); pmd 3962 drivers/md/dm-thin.c r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); pmd 3969 drivers/md/dm-thin.c r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); pmd 3976 drivers/md/dm-thin.c r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); pmd 3983 drivers/md/dm-thin.c r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); pmd 3990 drivers/md/dm-thin.c r = dm_pool_get_metadata_snap(pool->pmd, &held_root); pmd 4029 drivers/md/dm-thin.c if (dm_pool_metadata_needs_check(pool->pmd)) pmd 4270 drivers/md/dm-thin.c r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); pmd 316 drivers/soc/renesas/rmobile-sysc.c struct device_node *np, *pmd; pmd 328 drivers/soc/renesas/rmobile-sysc.c pmd = of_get_child_by_name(np, "pm-domains"); pmd 329 drivers/soc/renesas/rmobile-sysc.c if (!pmd) { pmd 340 drivers/soc/renesas/rmobile-sysc.c ret = rmobile_add_pm_domains(base, pmd, NULL); pmd 341 drivers/soc/renesas/rmobile-sysc.c of_node_put(pmd); pmd 814 fs/dax.c pmd_t pmd; pmd 822 fs/dax.c pmd = pmdp_invalidate(vma, address, pmdp); pmd 823 fs/dax.c pmd = pmd_wrprotect(pmd); pmd 824 fs/dax.c pmd = pmd_mkclean(pmd); pmd 825 fs/dax.c set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmd 1289 fs/dax.c if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { pmd 1442 fs/dax.c ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); pmd 1443 fs/dax.c if (!pmd_none(*(vmf->pmd))) { pmd 1449 fs/dax.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); pmd 1454 fs/dax.c set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); pmd 1541 fs/dax.c if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && pmd 1542 fs/dax.c !pmd_devmap(*vmf->pmd)) { pmd 1618 fs/dax.c split_huge_pmd(vma, vmf->pmd, vmf->address); pmd 573 fs/proc/task_mmu.c static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, pmd 582 fs/proc/task_mmu.c page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); pmd 593 fs/proc/task_mmu.c smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); pmd 596 fs/proc/task_mmu.c static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, pmd 602 fs/proc/task_mmu.c static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pmd 609 fs/proc/task_mmu.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 611 fs/proc/task_mmu.c if (pmd_present(*pmd)) pmd 612 fs/proc/task_mmu.c smaps_pmd_entry(pmd, addr, walk); pmd 617 fs/proc/task_mmu.c if (pmd_trans_unstable(pmd)) pmd 624 fs/proc/task_mmu.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 1022 fs/proc/task_mmu.c pmd_t old, pmd = *pmdp; pmd 1024 fs/proc/task_mmu.c if (pmd_present(pmd)) { pmd 1028 fs/proc/task_mmu.c pmd = pmd_mkdirty(pmd); pmd 1030 fs/proc/task_mmu.c pmd = pmd_mkyoung(pmd); pmd 1032 fs/proc/task_mmu.c pmd = pmd_wrprotect(pmd); pmd 1033 fs/proc/task_mmu.c pmd = pmd_clear_soft_dirty(pmd); pmd 1035 fs/proc/task_mmu.c set_pmd_at(vma->vm_mm, addr, pmdp, pmd); pmd 1036 fs/proc/task_mmu.c } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { pmd 1037 fs/proc/task_mmu.c pmd = pmd_swp_clear_soft_dirty(pmd); pmd 1038 fs/proc/task_mmu.c set_pmd_at(vma->vm_mm, addr, pmdp, pmd); pmd 1048 fs/proc/task_mmu.c static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, pmd 1057 fs/proc/task_mmu.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 1060 fs/proc/task_mmu.c clear_soft_dirty_pmd(vma, addr, pmd); pmd 1064 fs/proc/task_mmu.c if (!pmd_present(*pmd)) pmd 1067 fs/proc/task_mmu.c page = pmd_page(*pmd); pmd 1070 fs/proc/task_mmu.c pmdp_test_and_clear_young(vma, addr, pmd); pmd 1078 fs/proc/task_mmu.c if (pmd_trans_unstable(pmd)) pmd 1081 fs/proc/task_mmu.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 1376 fs/proc/task_mmu.c pmd_t pmd = *pmdp; pmd 1382 fs/proc/task_mmu.c if (pmd_present(pmd)) { pmd 1383 fs/proc/task_mmu.c page = pmd_page(pmd); pmd 1386 fs/proc/task_mmu.c if (pmd_soft_dirty(pmd)) pmd 1389 fs/proc/task_mmu.c frame = pmd_pfn(pmd) + pmd 1393 fs/proc/task_mmu.c else if (is_swap_pmd(pmd)) { pmd 1394 fs/proc/task_mmu.c swp_entry_t entry = pmd_to_swp_entry(pmd); pmd 1404 fs/proc/task_mmu.c if (pmd_swp_soft_dirty(pmd)) pmd 1406 fs/proc/task_mmu.c VM_BUG_ON(!is_pmd_migration_entry(pmd)); pmd 1718 fs/proc/task_mmu.c static struct page *can_gather_numa_stats_pmd(pmd_t pmd, pmd 1725 fs/proc/task_mmu.c if (!pmd_present(pmd)) pmd 1728 fs/proc/task_mmu.c page = vm_normal_page_pmd(vma, addr, pmd); pmd 1743 fs/proc/task_mmu.c static int gather_pte_stats(pmd_t *pmd, unsigned long addr, pmd 1753 fs/proc/task_mmu.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 1757 fs/proc/task_mmu.c page = can_gather_numa_stats_pmd(*pmd, vma, addr); pmd 1759 fs/proc/task_mmu.c gather_stats(page, md, pmd_dirty(*pmd), pmd 1765 fs/proc/task_mmu.c if (pmd_trans_unstable(pmd)) pmd 1768 fs/proc/task_mmu.c orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pmd 285 fs/userfaultfd.c pmd_t *pmd, _pmd; pmd 300 fs/userfaultfd.c pmd = pmd_offset(pud, address); pmd 309 fs/userfaultfd.c _pmd = READ_ONCE(*pmd); pmd 324 fs/userfaultfd.c pte = pte_offset_map(pmd, address); pmd 26 include/asm-generic/4level-fixup.h #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) pmd 41 include/asm-generic/page.h unsigned long pmd[16]; pmd 52 include/asm-generic/page.h #define pmd_val(x) ((&x)->pmd[0]) pmd 34 include/asm-generic/pgtable-nopmd.h #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud)) pmd 36 include/asm-generic/pgtable-nopmd.h #define pud_populate(mm, pmd, pte) do { } while (0) pmd 60 include/asm-generic/pgtable-nopmd.h static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) pmd 82 include/asm-generic/pgtable.h pmd_t pmd = *pmdp; pmd 84 include/asm-generic/pgtable.h if (!pmd_young(pmd)) pmd 87 include/asm-generic/pgtable.h set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); pmd 141 include/asm-generic/pgtable.h pmd_t pmd = *pmdp; pmd 143 include/asm-generic/pgtable.h return pmd; pmd 319 include/asm-generic/pgtable.h unsigned long address, pmd_t *pmdp, pmd_t pmd) pmd 322 include/asm-generic/pgtable.h set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmd 358 include/asm-generic/pgtable.h #define pmd_access_permitted(pmd, write) \ pmd 359 include/asm-generic/pgtable.h (pmd_present(pmd) && (!(write) || pmd_write(pmd))) pmd 417 include/asm-generic/pgtable.h #define set_pmd_safe(pmdp, pmd) \ pmd 419 include/asm-generic/pgtable.h WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ pmd 420 include/asm-generic/pgtable.h set_pmd(pmdp, pmd); \ pmd 598 include/asm-generic/pgtable.h static inline int pmd_none_or_clear_bad(pmd_t *pmd) pmd 600 include/asm-generic/pgtable.h if (pmd_none(*pmd)) pmd 602 include/asm-generic/pgtable.h if (unlikely(pmd_bad(*pmd))) { pmd 603 include/asm-generic/pgtable.h pmd_clear_bad(pmd); pmd 717 include/asm-generic/pgtable.h static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) pmd 719 include/asm-generic/pgtable.h return pmd; pmd 722 include/asm-generic/pgtable.h static inline int pmd_swp_soft_dirty(pmd_t pmd) pmd 727 include/asm-generic/pgtable.h static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) pmd 729 include/asm-generic/pgtable.h return pmd; pmd 738 include/asm-generic/pgtable.h static inline int pmd_soft_dirty(pmd_t pmd) pmd 748 include/asm-generic/pgtable.h static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) pmd 750 include/asm-generic/pgtable.h return pmd; pmd 758 include/asm-generic/pgtable.h static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) pmd 760 include/asm-generic/pgtable.h return pmd; pmd 778 include/asm-generic/pgtable.h static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) pmd 780 include/asm-generic/pgtable.h return pmd; pmd 783 include/asm-generic/pgtable.h static inline int pmd_swp_soft_dirty(pmd_t pmd) pmd 788 include/asm-generic/pgtable.h static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) pmd 790 include/asm-generic/pgtable.h return pmd; pmd 885 include/asm-generic/pgtable.h static inline int pmd_trans_huge(pmd_t pmd) pmd 890 include/asm-generic/pgtable.h static inline int pmd_write(pmd_t pmd) pmd 951 include/asm-generic/pgtable.h static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) pmd 953 include/asm-generic/pgtable.h pmd_t pmdval = pmd_read_atomic(pmd); pmd 990 include/asm-generic/pgtable.h pmd_clear_bad(pmd); pmd 1008 include/asm-generic/pgtable.h static inline int pmd_trans_unstable(pmd_t *pmd) pmd 1011 include/asm-generic/pgtable.h return pmd_none_or_trans_huge_or_clear_bad(pmd); pmd 1031 include/asm-generic/pgtable.h static inline int pmd_protnone(pmd_t pmd) pmd 1056 include/asm-generic/pgtable.h int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); pmd 1058 include/asm-generic/pgtable.h int pmd_clear_huge(pmd_t *pmd); pmd 1061 include/asm-generic/pgtable.h int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); pmd 1071 include/asm-generic/pgtable.h static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) pmd 1083 include/asm-generic/pgtable.h static inline int pmd_clear_huge(pmd_t *pmd) pmd 1095 include/asm-generic/pgtable.h static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) pmd 30 include/linux/huge_mm.h pmd_t *pmd, pmd 34 include/linux/huge_mm.h pmd_t *pmd, unsigned long addr, unsigned long next); pmd 37 include/linux/huge_mm.h pmd_t *pmd, unsigned long addr); pmd 41 include/linux/huge_mm.h extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 47 include/linux/huge_mm.h extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 172 include/linux/huge_mm.h void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 205 include/linux/huge_mm.h extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, pmd 210 include/linux/huge_mm.h static inline int is_swap_pmd(pmd_t pmd) pmd 212 include/linux/huge_mm.h return !pmd_none(pmd) && !pmd_present(pmd); pmd 216 include/linux/huge_mm.h static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, pmd 220 include/linux/huge_mm.h if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) pmd 221 include/linux/huge_mm.h return __pmd_trans_huge_lock(pmd, vma); pmd 242 include/linux/huge_mm.h pmd_t *pmd, int flags, struct dev_pagemap **pgmap); pmd 255 include/linux/huge_mm.h static inline bool is_huge_zero_pmd(pmd_t pmd) pmd 257 include/linux/huge_mm.h return is_huge_zero_page(pmd_page(pmd)); pmd 336 include/linux/huge_mm.h static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 356 include/linux/huge_mm.h static inline int is_swap_pmd(pmd_t pmd) pmd 360 include/linux/huge_mm.h static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, pmd 393 include/linux/huge_mm.h unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) pmd 131 include/linux/hugetlb.h pmd_t *pmd, int flags); pmd 137 include/linux/hugetlb.h int pmd_huge(pmd_t pmd); pmd 178 include/linux/hugetlb.h #define follow_huge_pmd(mm, addr, pmd, flags) NULL pmd 125 include/linux/migrate.h extern bool pmd_trans_migrating(pmd_t pmd); pmd 129 include/linux/migrate.h static inline bool pmd_trans_migrating(pmd_t pmd) pmd 143 include/linux/migrate.h pmd_t *pmd, pmd_t entry, pmd 149 include/linux/migrate.h pmd_t *pmd, pmd_t entry, pmd 423 include/linux/mm.h pmd_t *pmd; /* Pointer to pmd entry matching pmd 568 include/linux/mm.h static inline int pmd_devmap(pmd_t pmd) pmd 1454 include/linux/mm.h pmd_t pmd); pmd 1857 include/linux/mm.h int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); pmd 1858 include/linux/mm.h int __pte_alloc_kernel(pmd_t *pmd); pmd 1919 include/linux/mm.h static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) pmd 1921 include/linux/mm.h return ptlock_ptr(pmd_page(*pmd)); pmd 1944 include/linux/mm.h static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) pmd 1975 include/linux/mm.h #define pte_offset_map_lock(mm, pmd, address, ptlp) \ pmd 1977 include/linux/mm.h spinlock_t *__ptl = pte_lockptr(mm, pmd); \ pmd 1978 include/linux/mm.h pte_t *__pte = pte_offset_map(pmd, address); \ pmd 1989 include/linux/mm.h #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) pmd 1991 include/linux/mm.h #define pte_alloc_map(mm, pmd, address) \ pmd 1992 include/linux/mm.h (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) pmd 1994 include/linux/mm.h #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ pmd 1995 include/linux/mm.h (pte_alloc(mm, pmd) ? \ pmd 1996 include/linux/mm.h NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) pmd 1998 include/linux/mm.h #define pte_alloc_kernel(pmd, address) \ pmd 1999 include/linux/mm.h ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ pmd 2000 include/linux/mm.h NULL: pte_offset_kernel(pmd, address)) pmd 2004 include/linux/mm.h static struct page *pmd_to_page(pmd_t *pmd) pmd 2007 include/linux/mm.h return virt_to_page((void *)((unsigned long) pmd & mask)); pmd 2010 include/linux/mm.h static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) pmd 2012 include/linux/mm.h return ptlock_ptr(pmd_to_page(pmd)); pmd 2031 include/linux/mm.h #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) pmd 2035 include/linux/mm.h static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) pmd 2043 include/linux/mm.h #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) pmd 2047 include/linux/mm.h static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) pmd 2049 include/linux/mm.h spinlock_t *ptl = pmd_lockptr(mm, pmd); pmd 2779 include/linux/mm.h pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); pmd 31 include/linux/pagewalk.h int (*pmd_entry)(pmd_t *pmd, unsigned long addr, pmd 113 include/linux/pfn_t.h pmd_t pmd_mkdevmap(pmd_t pmd); pmd 209 include/linux/rmap.h pmd_t *pmd; pmd 206 include/linux/swapops.h extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, pmd 231 include/linux/swapops.h static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, pmd 251 include/linux/swapops.h extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); pmd 253 include/linux/swapops.h static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) pmd 257 include/linux/swapops.h if (pmd_swp_soft_dirty(pmd)) pmd 258 include/linux/swapops.h pmd = pmd_swp_clear_soft_dirty(pmd); pmd 259 include/linux/swapops.h arch_entry = __pmd_to_swp_entry(pmd); pmd 271 include/linux/swapops.h static inline int is_pmd_migration_entry(pmd_t pmd) pmd 273 include/linux/swapops.h return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); pmd 290 include/linux/swapops.h static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) pmd 300 include/linux/swapops.h static inline int is_pmd_migration_entry(pmd_t pmd) pmd 31 include/trace/events/thp.h TP_PROTO(unsigned long addr, unsigned long pmd), pmd 32 include/trace/events/thp.h TP_ARGS(addr, pmd), pmd 35 include/trace/events/thp.h __field(unsigned long, pmd) pmd 40 include/trace/events/thp.h __entry->pmd = pmd; pmd 43 include/trace/events/thp.h TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd) pmd 186 include/trace/events/xen.h __entry->pmdval = pmdval.pmd), pmd 63 lib/ioremap.c static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, pmd 70 lib/ioremap.c pte = pte_alloc_kernel(pmd, addr); pmd 81 lib/ioremap.c static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, pmd 97 lib/ioremap.c if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) pmd 100 lib/ioremap.c return pmd_set_huge(pmd, phys_addr, prot); pmd 106 lib/ioremap.c pmd_t *pmd; pmd 109 lib/ioremap.c pmd = pmd_alloc(&init_mm, pud, addr); pmd 110 lib/ioremap.c if (!pmd) pmd 115 lib/ioremap.c if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) pmd 118 lib/ioremap.c if (ioremap_pte_range(pmd, addr, next, phys_addr, prot)) pmd 120 lib/ioremap.c } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); pmd 2664 mm/filemap.c if (pmd_trans_huge(*vmf->pmd)) pmd 183 mm/gup.c unsigned long address, pmd_t *pmd, unsigned int flags, pmd 192 mm/gup.c if (unlikely(pmd_bad(*pmd))) pmd 195 mm/gup.c ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pmd 212 mm/gup.c migration_entry_wait(mm, pmd, address); pmd 322 mm/gup.c pmd_t *pmd, pmdval; pmd 327 mm/gup.c pmd = pmd_offset(pudp, address); pmd 332 mm/gup.c pmdval = READ_ONCE(*pmd); pmd 336 mm/gup.c page = follow_huge_pmd(mm, address, pmd, flags); pmd 356 mm/gup.c pmd_migration_entry_wait(mm, pmd); pmd 357 mm/gup.c pmdval = READ_ONCE(*pmd); pmd 367 mm/gup.c ptl = pmd_lock(mm, pmd); pmd 368 mm/gup.c page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); pmd 374 mm/gup.c return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); pmd 380 mm/gup.c ptl = pmd_lock(mm, pmd); pmd 381 mm/gup.c if (unlikely(pmd_none(*pmd))) { pmd 385 mm/gup.c if (unlikely(!pmd_present(*pmd))) { pmd 389 mm/gup.c pmd_migration_entry_wait(mm, pmd); pmd 392 mm/gup.c if (unlikely(!pmd_trans_huge(*pmd))) { pmd 394 mm/gup.c return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); pmd 398 mm/gup.c page = pmd_page(*pmd); pmd 402 mm/gup.c split_huge_pmd(vma, pmd, address); pmd 403 mm/gup.c if (pmd_trans_unstable(pmd)) pmd 415 mm/gup.c if (pmd_none(*pmd)) pmd 419 mm/gup.c split_huge_pmd(vma, pmd, address); pmd 420 mm/gup.c ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; pmd 424 mm/gup.c follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); pmd 426 mm/gup.c page = follow_trans_huge_pmd(vma, address, pmd, flags); pmd 576 mm/gup.c pmd_t *pmd; pmd 595 mm/gup.c pmd = pmd_offset(pud, address); pmd 596 mm/gup.c if (!pmd_present(*pmd)) pmd 598 mm/gup.c VM_BUG_ON(pmd_trans_huge(*pmd)); pmd 599 mm/gup.c pte = pte_offset_map(pmd, address); pmd 1826 mm/gup.c static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, pmd 1833 mm/gup.c ptem = ptep = pte_offset_map(&pmd, addr); pmd 1899 mm/gup.c static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, pmd 2195 mm/gup.c pmd_t pmd = READ_ONCE(*pmdp); pmd 2198 mm/gup.c if (!pmd_present(pmd)) pmd 2201 mm/gup.c if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd 2202 mm/gup.c pmd_devmap(pmd))) { pmd 2208 mm/gup.c if (pmd_protnone(pmd)) pmd 2211 mm/gup.c if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, pmd 2215 mm/gup.c } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { pmd 2220 mm/gup.c if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, pmd 2223 mm/gup.c } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) pmd 395 mm/hmm.c static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) pmd 397 mm/hmm.c if (pmd_protnone(pmd)) pmd 399 mm/hmm.c return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | pmd 406 mm/hmm.c unsigned long end, uint64_t *pfns, pmd_t pmd) pmd 415 mm/hmm.c cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); pmd 419 mm/hmm.c if (pmd_protnone(pmd) || fault || write_fault) pmd 422 mm/hmm.c pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); pmd 424 mm/hmm.c if (pmd_devmap(pmd)) { pmd 442 mm/hmm.c unsigned long end, uint64_t *pfns, pmd_t pmd); pmd 562 mm/hmm.c pmd_t pmd; pmd 565 mm/hmm.c pmd = READ_ONCE(*pmdp); pmd 566 mm/hmm.c if (pmd_none(pmd)) pmd 569 mm/hmm.c if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { pmd 586 mm/hmm.c } else if (!pmd_present(pmd)) pmd 589 mm/hmm.c if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { pmd 599 mm/hmm.c pmd = pmd_read_atomic(pmdp); pmd 601 mm/hmm.c if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) pmd 605 mm/hmm.c return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); pmd 614 mm/hmm.c if (pmd_bad(pmd)) pmd 484 mm/huge_memory.c pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) pmd 487 mm/huge_memory.c pmd = pmd_mkwrite(pmd); pmd 488 mm/huge_memory.c return pmd; pmd 606 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 607 mm/huge_memory.c if (unlikely(!pmd_none(*vmf->pmd))) { pmd 634 mm/huge_memory.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); pmd 635 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); pmd 692 mm/huge_memory.c struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, pmd 696 mm/huge_memory.c if (!pmd_none(*pmd)) pmd 701 mm/huge_memory.c pgtable_trans_huge_deposit(mm, pmd, pgtable); pmd 702 mm/huge_memory.c set_pmd_at(mm, haddr, pmd, entry); pmd 736 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 739 mm/huge_memory.c if (pmd_none(*vmf->pmd)) { pmd 749 mm/huge_memory.c haddr, vmf->pmd, zero_page); pmd 770 mm/huge_memory.c pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pmd 777 mm/huge_memory.c ptl = pmd_lock(mm, pmd); pmd 778 mm/huge_memory.c if (!pmd_none(*pmd)) { pmd 780 mm/huge_memory.c if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { pmd 781 mm/huge_memory.c WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); pmd 784 mm/huge_memory.c entry = pmd_mkyoung(*pmd); pmd 786 mm/huge_memory.c if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) pmd 787 mm/huge_memory.c update_mmu_cache_pmd(vma, addr, pmd); pmd 802 mm/huge_memory.c pgtable_trans_huge_deposit(mm, pmd, pgtable); pmd 807 mm/huge_memory.c set_pmd_at(mm, addr, pmd, entry); pmd 808 mm/huge_memory.c update_mmu_cache_pmd(vma, addr, pmd); pmd 845 mm/huge_memory.c insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); pmd 923 mm/huge_memory.c pmd_t *pmd, int flags) pmd 927 mm/huge_memory.c _pmd = pmd_mkyoung(*pmd); pmd 931 mm/huge_memory.c pmd, _pmd, flags & FOLL_WRITE)) pmd 932 mm/huge_memory.c update_mmu_cache_pmd(vma, addr, pmd); pmd 936 mm/huge_memory.c pmd_t *pmd, int flags, struct dev_pagemap **pgmap) pmd 938 mm/huge_memory.c unsigned long pfn = pmd_pfn(*pmd); pmd 942 mm/huge_memory.c assert_spin_locked(pmd_lockptr(mm, pmd)); pmd 950 mm/huge_memory.c if (flags & FOLL_WRITE && !pmd_write(*pmd)) pmd 953 mm/huge_memory.c if (pmd_present(*pmd) && pmd_devmap(*pmd)) pmd 959 mm/huge_memory.c touch_pmd(vma, addr, pmd, flags); pmd 984 mm/huge_memory.c pmd_t pmd; pmd 1001 mm/huge_memory.c pmd = *src_pmd; pmd 1004 mm/huge_memory.c if (unlikely(is_swap_pmd(pmd))) { pmd 1005 mm/huge_memory.c swp_entry_t entry = pmd_to_swp_entry(pmd); pmd 1007 mm/huge_memory.c VM_BUG_ON(!is_pmd_migration_entry(pmd)); pmd 1010 mm/huge_memory.c pmd = swp_entry_to_pmd(entry); pmd 1012 mm/huge_memory.c pmd = pmd_swp_mksoft_dirty(pmd); pmd 1013 mm/huge_memory.c set_pmd_at(src_mm, addr, src_pmd, pmd); pmd 1018 mm/huge_memory.c set_pmd_at(dst_mm, addr, dst_pmd, pmd); pmd 1024 mm/huge_memory.c if (unlikely(!pmd_trans_huge(pmd))) { pmd 1033 mm/huge_memory.c if (is_huge_zero_pmd(pmd)) { pmd 1047 mm/huge_memory.c src_page = pmd_page(pmd); pmd 1056 mm/huge_memory.c pmd = pmd_mkold(pmd_wrprotect(pmd)); pmd 1057 mm/huge_memory.c set_pmd_at(dst_mm, addr, dst_pmd, pmd); pmd 1183 mm/huge_memory.c vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); pmd 1184 mm/huge_memory.c if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) pmd 1191 mm/huge_memory.c if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) pmd 1192 mm/huge_memory.c update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); pmd 1251 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 1252 mm/huge_memory.c if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) pmd 1264 mm/huge_memory.c pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); pmd 1266 mm/huge_memory.c pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); pmd 1286 mm/huge_memory.c pmd_populate(vma->vm_mm, vmf->pmd, pgtable); pmd 1325 mm/huge_memory.c vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); pmd 1330 mm/huge_memory.c if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) pmd 1344 mm/huge_memory.c if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { pmd 1355 mm/huge_memory.c if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) pmd 1356 mm/huge_memory.c update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); pmd 1376 mm/huge_memory.c split_huge_pmd(vma, vmf->pmd, vmf->address); pmd 1381 mm/huge_memory.c split_huge_pmd(vma, vmf->pmd, vmf->address); pmd 1393 mm/huge_memory.c split_huge_pmd(vma, vmf->pmd, vmf->address); pmd 1418 mm/huge_memory.c if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { pmd 1427 mm/huge_memory.c pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); pmd 1431 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); pmd 1432 mm/huge_memory.c update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); pmd 1460 mm/huge_memory.c static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) pmd 1462 mm/huge_memory.c return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); pmd 1467 mm/huge_memory.c pmd_t *pmd, pmd 1473 mm/huge_memory.c assert_spin_locked(pmd_lockptr(mm, pmd)); pmd 1475 mm/huge_memory.c if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) pmd 1479 mm/huge_memory.c if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) pmd 1483 mm/huge_memory.c if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) pmd 1486 mm/huge_memory.c page = pmd_page(*pmd); pmd 1489 mm/huge_memory.c touch_pmd(vma, addr, pmd, flags); pmd 1534 mm/huge_memory.c vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) pmd 1547 mm/huge_memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 1548 mm/huge_memory.c if (unlikely(!pmd_same(pmd, *vmf->pmd))) pmd 1556 mm/huge_memory.c if (unlikely(pmd_trans_migrating(*vmf->pmd))) { pmd 1557 mm/huge_memory.c page = pmd_page(*vmf->pmd); pmd 1565 mm/huge_memory.c page = pmd_page(pmd); pmd 1576 mm/huge_memory.c if (!pmd_savedwrite(pmd)) pmd 1611 mm/huge_memory.c if (unlikely(!pmd_same(pmd, *vmf->pmd))) { pmd 1658 mm/huge_memory.c vmf->pmd, pmd, vmf->address, page, target_nid); pmd 1668 mm/huge_memory.c was_writable = pmd_savedwrite(pmd); pmd 1669 mm/huge_memory.c pmd = pmd_modify(pmd, vma->vm_page_prot); pmd 1670 mm/huge_memory.c pmd = pmd_mkyoung(pmd); pmd 1672 mm/huge_memory.c pmd = pmd_mkwrite(pmd); pmd 1673 mm/huge_memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); pmd 1674 mm/huge_memory.c update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); pmd 1695 mm/huge_memory.c pmd_t *pmd, unsigned long addr, unsigned long next) pmd 1705 mm/huge_memory.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 1709 mm/huge_memory.c orig_pmd = *pmd; pmd 1748 mm/huge_memory.c pmdp_invalidate(vma, addr, pmd); pmd 1752 mm/huge_memory.c set_pmd_at(mm, addr, pmd, orig_pmd); pmd 1753 mm/huge_memory.c tlb_remove_pmd_tlb_entry(tlb, pmd, addr); pmd 1764 mm/huge_memory.c static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) pmd 1768 mm/huge_memory.c pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd 1774 mm/huge_memory.c pmd_t *pmd, unsigned long addr) pmd 1781 mm/huge_memory.c ptl = __pmd_trans_huge_lock(pmd, vma); pmd 1790 mm/huge_memory.c orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, pmd 1792 mm/huge_memory.c tlb_remove_pmd_tlb_entry(tlb, pmd, addr); pmd 1795 mm/huge_memory.c zap_deposited_table(tlb->mm, pmd); pmd 1800 mm/huge_memory.c zap_deposited_table(tlb->mm, pmd); pmd 1823 mm/huge_memory.c zap_deposited_table(tlb->mm, pmd); pmd 1827 mm/huge_memory.c zap_deposited_table(tlb->mm, pmd); pmd 1853 mm/huge_memory.c static pmd_t move_soft_dirty_pmd(pmd_t pmd) pmd 1856 mm/huge_memory.c if (unlikely(is_pmd_migration_entry(pmd))) pmd 1857 mm/huge_memory.c pmd = pmd_swp_mksoft_dirty(pmd); pmd 1858 mm/huge_memory.c else if (pmd_present(pmd)) pmd 1859 mm/huge_memory.c pmd = pmd_mksoft_dirty(pmd); pmd 1861 mm/huge_memory.c return pmd; pmd 1869 mm/huge_memory.c pmd_t pmd; pmd 1896 mm/huge_memory.c pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); pmd 1897 mm/huge_memory.c if (pmd_present(pmd)) pmd 1906 mm/huge_memory.c pmd = move_soft_dirty_pmd(pmd); pmd 1907 mm/huge_memory.c set_pmd_at(mm, new_addr, new_pmd, pmd); pmd 1924 mm/huge_memory.c int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 1933 mm/huge_memory.c ptl = __pmd_trans_huge_lock(pmd, vma); pmd 1937 mm/huge_memory.c preserve_write = prot_numa && pmd_write(*pmd); pmd 1941 mm/huge_memory.c if (is_swap_pmd(*pmd)) { pmd 1942 mm/huge_memory.c swp_entry_t entry = pmd_to_swp_entry(*pmd); pmd 1944 mm/huge_memory.c VM_BUG_ON(!is_pmd_migration_entry(*pmd)); pmd 1953 mm/huge_memory.c if (pmd_swp_soft_dirty(*pmd)) pmd 1955 mm/huge_memory.c set_pmd_at(mm, addr, pmd, newpmd); pmd 1966 mm/huge_memory.c if (prot_numa && is_huge_zero_pmd(*pmd)) pmd 1969 mm/huge_memory.c if (prot_numa && pmd_protnone(*pmd)) pmd 1993 mm/huge_memory.c entry = pmdp_invalidate(vma, addr, pmd); pmd 1999 mm/huge_memory.c set_pmd_at(mm, addr, pmd, entry); pmd 2012 mm/huge_memory.c spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) pmd 2015 mm/huge_memory.c ptl = pmd_lock(vma->vm_mm, pmd); pmd 2016 mm/huge_memory.c if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd 2017 mm/huge_memory.c pmd_devmap(*pmd))) pmd 2106 mm/huge_memory.c unsigned long haddr, pmd_t *pmd) pmd 2121 mm/huge_memory.c pmdp_huge_clear_flush(vma, haddr, pmd); pmd 2123 mm/huge_memory.c pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd 2136 mm/huge_memory.c pmd_populate(mm, pmd, pgtable); pmd 2139 mm/huge_memory.c static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pmd 2153 mm/huge_memory.c VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) pmd 2154 mm/huge_memory.c && !pmd_devmap(*pmd)); pmd 2159 mm/huge_memory.c _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); pmd 2165 mm/huge_memory.c zap_deposited_table(mm, pmd); pmd 2177 mm/huge_memory.c } else if (is_huge_zero_pmd(*pmd)) { pmd 2187 mm/huge_memory.c return __split_huge_zero_page_pmd(vma, haddr, pmd); pmd 2210 mm/huge_memory.c old_pmd = pmdp_invalidate(vma, haddr, pmd); pmd 2236 mm/huge_memory.c pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd 2289 mm/huge_memory.c pmd_populate(mm, pmd, pgtable); pmd 2299 mm/huge_memory.c void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, pmd 2309 mm/huge_memory.c ptl = pmd_lock(vma->vm_mm, pmd); pmd 2316 mm/huge_memory.c if (page && page != pmd_page(*pmd)) pmd 2319 mm/huge_memory.c if (pmd_trans_huge(*pmd)) { pmd 2320 mm/huge_memory.c page = pmd_page(*pmd); pmd 2323 mm/huge_memory.c } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) pmd 2325 mm/huge_memory.c __split_huge_pmd_locked(vma, pmd, range.start, freeze); pmd 2350 mm/huge_memory.c pmd_t *pmd; pmd 2364 mm/huge_memory.c pmd = pmd_offset(pud, address); pmd 2366 mm/huge_memory.c __split_huge_pmd(vma, pmd, address, freeze, page); pmd 3030 mm/huge_memory.c if (!(pvmw->pmd && !pvmw->pte)) pmd 3034 mm/huge_memory.c pmdval = pmdp_invalidate(vma, address, pvmw->pmd); pmd 3041 mm/huge_memory.c set_pmd_at(mm, address, pvmw->pmd, pmdswp); pmd 3055 mm/huge_memory.c if (!(pvmw->pmd && !pvmw->pte)) pmd 3058 mm/huge_memory.c entry = pmd_to_swp_entry(*pvmw->pmd); pmd 3061 mm/huge_memory.c if (pmd_swp_soft_dirty(*pvmw->pmd)) pmd 3071 mm/huge_memory.c set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); pmd 3074 mm/huge_memory.c update_mmu_cache_pmd(vma, address, pvmw->pmd); pmd 5020 mm/hugetlb.c pmd_t *pmd, pmd_entry; pmd 5037 mm/hugetlb.c pmd = pmd_offset(pud, addr); pmd 5038 mm/hugetlb.c pmd_entry = READ_ONCE(*pmd); pmd 5043 mm/hugetlb.c return (pte_t *)pmd; pmd 5071 mm/hugetlb.c pmd_t *pmd, int flags) pmd 5077 mm/hugetlb.c ptl = pmd_lockptr(mm, pmd); pmd 5083 mm/hugetlb.c if (!pmd_huge(*pmd)) pmd 5085 mm/hugetlb.c pte = huge_ptep_get((pte_t *)pmd); pmd 5087 mm/hugetlb.c page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); pmd 5093 mm/hugetlb.c __migration_entry_wait(mm, (pte_t *)pmd, ptl); pmd 339 mm/internal.h extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); pmd 644 mm/kasan/common.c pmd_t *pmd; pmd 663 mm/kasan/common.c pmd = pmd_offset(pud, addr); pmd 664 mm/kasan/common.c if (pmd_none(*pmd)) pmd 667 mm/kasan/common.c if (pmd_bad(*pmd)) pmd 669 mm/kasan/common.c pte = pte_offset_kernel(pmd, addr); pmd 74 mm/kasan/init.c static inline bool kasan_pte_table(pmd_t pmd) pmd 76 mm/kasan/init.c return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); pmd 96 mm/kasan/init.c static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, pmd 99 mm/kasan/init.c pte_t *pte = pte_offset_kernel(pmd, addr); pmd 109 mm/kasan/init.c pte = pte_offset_kernel(pmd, addr); pmd 116 mm/kasan/init.c pmd_t *pmd = pmd_offset(pud, addr); pmd 123 mm/kasan/init.c pmd_populate_kernel(&init_mm, pmd, pmd 128 mm/kasan/init.c if (pmd_none(*pmd)) { pmd 138 mm/kasan/init.c pmd_populate_kernel(&init_mm, pmd, p); pmd 140 mm/kasan/init.c zero_pte_populate(pmd, addr, next); pmd 141 mm/kasan/init.c } while (pmd++, addr = next, addr != end); pmd 155 mm/kasan/init.c pmd_t *pmd; pmd 159 mm/kasan/init.c pmd = pmd_offset(pud, addr); pmd 160 mm/kasan/init.c pmd_populate_kernel(&init_mm, pmd, pmd 193 mm/kasan/init.c pmd_t *pmd; pmd 200 mm/kasan/init.c pmd = pmd_offset(pud, addr); pmd 201 mm/kasan/init.c pmd_populate_kernel(&init_mm, pmd, pmd 244 mm/kasan/init.c pmd_t *pmd; pmd 273 mm/kasan/init.c pmd = pmd_offset(pud, addr); pmd 274 mm/kasan/init.c pmd_populate_kernel(&init_mm, pmd, pmd 297 mm/kasan/init.c static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) pmd 308 mm/kasan/init.c pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd))); pmd 309 mm/kasan/init.c pmd_clear(pmd); pmd 314 mm/kasan/init.c pmd_t *pmd; pmd 318 mm/kasan/init.c pmd = pmd_start + i; pmd 319 mm/kasan/init.c if (!pmd_none(*pmd)) pmd 376 mm/kasan/init.c static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, pmd 381 mm/kasan/init.c for (; addr < end; addr = next, pmd++) { pmd 386 mm/kasan/init.c if (!pmd_present(*pmd)) pmd 389 mm/kasan/init.c if (kasan_pte_table(*pmd)) { pmd 392 mm/kasan/init.c pmd_clear(pmd); pmd 395 mm/kasan/init.c pte = pte_offset_kernel(pmd, addr); pmd 397 mm/kasan/init.c kasan_free_pte(pte_offset_kernel(pmd, 0), pmd); pmd 407 mm/kasan/init.c pmd_t *pmd, *pmd_base; pmd 420 mm/kasan/init.c pmd = pmd_offset(pud, addr); pmd 422 mm/kasan/init.c kasan_remove_pmd_table(pmd, addr, next); pmd 892 mm/khugepaged.c unsigned long address, pmd_t *pmd, pmd 901 mm/khugepaged.c .pmd = pmd, pmd 910 mm/khugepaged.c vmf.pte = pte_offset_map(pmd, address); pmd 928 mm/khugepaged.c if (mm_find_pmd(mm, address) != pmd) { pmd 938 mm/khugepaged.c vmf.pte = pte_offset_map(pmd, vmf.address); pmd 951 mm/khugepaged.c pmd_t *pmd, _pmd; pmd 993 mm/khugepaged.c pmd = mm_find_pmd(mm, address); pmd 994 mm/khugepaged.c if (!pmd) { pmd 1006 mm/khugepaged.c if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { pmd 1026 mm/khugepaged.c if (mm_find_pmd(mm, address) != pmd) pmd 1035 mm/khugepaged.c pte = pte_offset_map(pmd, address); pmd 1036 mm/khugepaged.c pte_ptl = pte_lockptr(mm, pmd); pmd 1038 mm/khugepaged.c pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ pmd 1045 mm/khugepaged.c _pmd = pmdp_collapse_flush(vma, address, pmd); pmd 1056 mm/khugepaged.c BUG_ON(!pmd_none(*pmd)); pmd 1062 mm/khugepaged.c pmd_populate(mm, pmd, pmd_pgtable(_pmd)); pmd 1091 mm/khugepaged.c BUG_ON(!pmd_none(*pmd)); pmd 1096 mm/khugepaged.c pgtable_trans_huge_deposit(mm, pmd, pgtable); pmd 1097 mm/khugepaged.c set_pmd_at(mm, address, pmd, _pmd); pmd 1098 mm/khugepaged.c update_mmu_cache_pmd(vma, address, pmd); pmd 1120 mm/khugepaged.c pmd_t *pmd; pmd 1131 mm/khugepaged.c pmd = mm_find_pmd(mm, address); pmd 1132 mm/khugepaged.c if (!pmd) { pmd 1138 mm/khugepaged.c pte = pte_offset_map_lock(mm, pmd, address, &ptl); pmd 1296 mm/khugepaged.c pmd_t *pmd, _pmd; pmd 1314 mm/khugepaged.c pmd = mm_find_pmd(mm, haddr); pmd 1315 mm/khugepaged.c if (!pmd) pmd 1318 mm/khugepaged.c start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); pmd 1383 mm/khugepaged.c ptl = pmd_lock(vma->vm_mm, pmd); pmd 1384 mm/khugepaged.c _pmd = pmdp_collapse_flush(vma, addr, pmd); pmd 1421 mm/khugepaged.c pmd_t *pmd, _pmd; pmd 1448 mm/khugepaged.c pmd = mm_find_pmd(vma->vm_mm, addr); pmd 1449 mm/khugepaged.c if (!pmd) pmd 1459 mm/khugepaged.c spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); pmd 1461 mm/khugepaged.c _pmd = pmdp_collapse_flush(vma, addr, pmd); pmd 1123 mm/ksm.c pmd_t *pmd; pmd 1135 mm/ksm.c pmd = mm_find_pmd(mm, addr); pmd 1136 mm/ksm.c if (!pmd) pmd 1143 mm/ksm.c ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 183 mm/madvise.c static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, pmd 190 mm/madvise.c if (pmd_none_or_trans_huge_or_clear_bad(pmd)) pmd 199 mm/madvise.c orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); pmd 300 mm/madvise.c static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, pmd 318 mm/madvise.c if (pmd_trans_huge(*pmd)) { pmd 323 mm/madvise.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 327 mm/madvise.c orig_pmd = *pmd; pmd 358 mm/madvise.c pmdp_invalidate(vma, addr, pmd); pmd 361 mm/madvise.c set_pmd_at(mm, addr, pmd, orig_pmd); pmd 362 mm/madvise.c tlb_remove_pmd_tlb_entry(tlb, pmd, addr); pmd 383 mm/madvise.c if (pmd_trans_unstable(pmd)) pmd 388 mm/madvise.c orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 420 mm/madvise.c pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 425 mm/madvise.c pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 562 mm/madvise.c static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, pmd 576 mm/madvise.c if (pmd_trans_huge(*pmd)) pmd 577 mm/madvise.c if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) pmd 580 mm/madvise.c if (pmd_trans_unstable(pmd)) pmd 584 mm/madvise.c orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 630 mm/madvise.c pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 635 mm/madvise.c pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 5646 mm/memcontrol.c unsigned long addr, pmd_t pmd, union mc_target *target) pmd 5651 mm/memcontrol.c if (unlikely(is_swap_pmd(pmd))) { pmd 5653 mm/memcontrol.c !is_pmd_migration_entry(pmd)); pmd 5656 mm/memcontrol.c page = pmd_page(pmd); pmd 5671 mm/memcontrol.c unsigned long addr, pmd_t pmd, union mc_target *target) pmd 5677 mm/memcontrol.c static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, pmd 5685 mm/memcontrol.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 5692 mm/memcontrol.c if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) pmd 5698 mm/memcontrol.c if (pmd_trans_unstable(pmd)) pmd 5700 mm/memcontrol.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 5876 mm/memcontrol.c static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, pmd 5888 mm/memcontrol.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 5894 mm/memcontrol.c target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); pmd 5919 mm/memcontrol.c if (pmd_trans_unstable(pmd)) pmd 5922 mm/memcontrol.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 271 mm/memory-failure.c pmd_t *pmd; pmd 285 mm/memory-failure.c pmd = pmd_offset(pud, address); pmd 286 mm/memory-failure.c if (!pmd_present(*pmd)) pmd 288 mm/memory-failure.c if (pmd_devmap(*pmd)) pmd 290 mm/memory-failure.c pte = pte_offset_map(pmd, address); pmd 195 mm/memory.c static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, pmd 198 mm/memory.c pgtable_t token = pmd_pgtable(*pmd); pmd 199 mm/memory.c pmd_clear(pmd); pmd 208 mm/memory.c pmd_t *pmd; pmd 213 mm/memory.c pmd = pmd_offset(pud, addr); pmd 216 mm/memory.c if (pmd_none_or_clear_bad(pmd)) pmd 218 mm/memory.c free_pte_range(tlb, pmd, addr); pmd 219 mm/memory.c } while (pmd++, addr = next, addr != end); pmd 232 mm/memory.c pmd = pmd_offset(pud, start); pmd 234 mm/memory.c pmd_free_tlb(tlb, pmd, start); pmd 405 mm/memory.c int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) pmd 427 mm/memory.c ptl = pmd_lock(mm, pmd); pmd 428 mm/memory.c if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd 430 mm/memory.c pmd_populate(mm, pmd, new); pmd 439 mm/memory.c int __pte_alloc_kernel(pmd_t *pmd) pmd 448 mm/memory.c if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd 449 mm/memory.c pmd_populate_kernel(&init_mm, pmd, new); pmd 487 mm/memory.c pmd_t *pmd = pmd_offset(pud, addr); pmd 518 mm/memory.c (long long)pte_val(pte), (long long)pmd_val(*pmd)); pmd 631 mm/memory.c pmd_t pmd) pmd 633 mm/memory.c unsigned long pfn = pmd_pfn(pmd); pmd 655 mm/memory.c if (pmd_devmap(pmd)) pmd 1005 mm/memory.c struct vm_area_struct *vma, pmd_t *pmd, pmd 1020 mm/memory.c start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pmd 1144 mm/memory.c pmd_t *pmd; pmd 1147 mm/memory.c pmd = pmd_offset(pud, addr); pmd 1150 mm/memory.c if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { pmd 1152 mm/memory.c __split_huge_pmd(vma, pmd, addr, false, NULL); pmd 1153 mm/memory.c else if (zap_huge_pmd(tlb, vma, pmd, addr)) pmd 1164 mm/memory.c if (pmd_none_or_trans_huge_or_clear_bad(pmd)) pmd 1166 mm/memory.c next = zap_pte_range(tlb, vma, pmd, addr, next, details); pmd 1169 mm/memory.c } while (pmd++, addr = next, addr != end); pmd 1398 mm/memory.c pmd_t *pmd; pmd 1407 mm/memory.c pmd = pmd_alloc(mm, pud, addr); pmd 1408 mm/memory.c if (!pmd) pmd 1411 mm/memory.c VM_BUG_ON(pmd_trans_huge(*pmd)); pmd 1412 mm/memory.c return pte_alloc_map_lock(mm, pmd, addr, ptl); pmd 1791 mm/memory.c static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, pmd 1799 mm/memory.c pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); pmd 1821 mm/memory.c pmd_t *pmd; pmd 1826 mm/memory.c pmd = pmd_alloc(mm, pud, addr); pmd 1827 mm/memory.c if (!pmd) pmd 1829 mm/memory.c VM_BUG_ON(pmd_trans_huge(*pmd)); pmd 1832 mm/memory.c err = remap_pte_range(mm, pmd, addr, next, pmd 1836 mm/memory.c } while (pmd++, addr = next, addr != end); pmd 2004 mm/memory.c static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, pmd 2013 mm/memory.c pte_alloc_kernel(pmd, addr) : pmd 2014 mm/memory.c pte_alloc_map_lock(mm, pmd, addr, &ptl); pmd 2018 mm/memory.c BUG_ON(pmd_huge(*pmd)); pmd 2039 mm/memory.c pmd_t *pmd; pmd 2045 mm/memory.c pmd = pmd_alloc(mm, pud, addr); pmd 2046 mm/memory.c if (!pmd) pmd 2050 mm/memory.c err = apply_to_pte_range(mm, pmd, addr, next, fn, data); pmd 2053 mm/memory.c } while (pmd++, addr = next, addr != end); pmd 2132 mm/memory.c static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pmd 2138 mm/memory.c spinlock_t *ptl = pte_lockptr(mm, pmd); pmd 2361 mm/memory.c vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); pmd 2475 mm/memory.c vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, pmd 2598 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, pmd 2762 mm/memory.c if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) pmd 2768 mm/memory.c migration_entry_wait(vma->vm_mm, vmf->pmd, pmd 2813 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, pmd 2869 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, pmd 2988 mm/memory.c if (pte_alloc(vma->vm_mm, vmf->pmd)) pmd 2992 mm/memory.c if (unlikely(pmd_trans_unstable(vmf->pmd))) pmd 3000 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, pmd 3037 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, pmd 3101 mm/memory.c if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { pmd 3135 mm/memory.c static int pmd_devmap_trans_unstable(pmd_t *pmd) pmd 3137 mm/memory.c return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); pmd 3144 mm/memory.c if (!pmd_none(*vmf->pmd)) pmd 3147 mm/memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 3148 mm/memory.c if (unlikely(!pmd_none(*vmf->pmd))) { pmd 3154 mm/memory.c pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); pmd 3157 mm/memory.c } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { pmd 3172 mm/memory.c if (pmd_devmap_trans_unstable(vmf->pmd)) pmd 3184 mm/memory.c vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, pmd 3194 mm/memory.c pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); pmd 3229 mm/memory.c vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); pmd 3230 mm/memory.c if (unlikely(!pmd_none(*vmf->pmd))) pmd 3248 mm/memory.c set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); pmd 3250 mm/memory.c update_mmu_cache_pmd(vma, haddr, vmf->pmd); pmd 3291 mm/memory.c if (pmd_none(*vmf->pmd) && PageTransCompound(page) && pmd 3459 mm/memory.c if (pmd_none(*vmf->pmd)) { pmd 3469 mm/memory.c if (pmd_trans_huge(*vmf->pmd)) { pmd 3612 mm/memory.c if (unlikely(!pmd_present(*vmf->pmd))) pmd 3616 mm/memory.c vmf->pmd, pmd 3680 mm/memory.c vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); pmd 3772 mm/memory.c __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); pmd 3825 mm/memory.c if (unlikely(pmd_none(*vmf->pmd))) { pmd 3835 mm/memory.c if (pmd_devmap_trans_unstable(vmf->pmd)) pmd 3843 mm/memory.c vmf->pte = pte_offset_map(vmf->pmd, vmf->address); pmd 3874 mm/memory.c vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); pmd 3956 mm/memory.c vmf.pmd = pmd_alloc(mm, vmf.pud, address); pmd 3957 mm/memory.c if (!vmf.pmd) pmd 3959 mm/memory.c if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { pmd 3964 mm/memory.c pmd_t orig_pmd = *vmf.pmd; pmd 3971 mm/memory.c pmd_migration_entry_wait(mm, vmf.pmd); pmd 4139 mm/memory.c pmd_t *pmd; pmd 4154 mm/memory.c pmd = pmd_offset(pud, address); pmd 4155 mm/memory.c VM_BUG_ON(pmd_trans_huge(*pmd)); pmd 4157 mm/memory.c if (pmd_huge(*pmd)) { pmd 4167 mm/memory.c *ptlp = pmd_lock(mm, pmd); pmd 4168 mm/memory.c if (pmd_huge(*pmd)) { pmd 4169 mm/memory.c *pmdpp = pmd; pmd 4177 mm/memory.c if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) pmd 4186 mm/memory.c ptep = pte_offset_map_lock(mm, pmd, address, ptlp); pmd 441 mm/mempolicy.c static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, pmd 449 mm/mempolicy.c if (unlikely(is_pmd_migration_entry(*pmd))) { pmd 453 mm/mempolicy.c page = pmd_page(*pmd); pmd 456 mm/mempolicy.c __split_huge_pmd(walk->vma, pmd, addr, false, NULL); pmd 490 mm/mempolicy.c static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, pmd 502 mm/mempolicy.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 504 mm/mempolicy.c ret = queue_pages_pmd(pmd, ptl, addr, end, walk); pmd 510 mm/mempolicy.c if (pmd_trans_unstable(pmd)) pmd 513 mm/mempolicy.c pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pmd 339 mm/migrate.c void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, pmd 342 mm/migrate.c spinlock_t *ptl = pte_lockptr(mm, pmd); pmd 343 mm/migrate.c pte_t *ptep = pte_offset_map(pmd, address); pmd 355 mm/migrate.c void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) pmd 360 mm/migrate.c ptl = pmd_lock(mm, pmd); pmd 361 mm/migrate.c if (!is_pmd_migration_entry(*pmd)) pmd 363 mm/migrate.c page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); pmd 1958 mm/migrate.c bool pmd_trans_migrating(pmd_t pmd) pmd 1960 mm/migrate.c struct page *page = pmd_page(pmd); pmd 2026 mm/migrate.c pmd_t *pmd, pmd_t entry, pmd 2064 mm/migrate.c ptl = pmd_lock(mm, pmd); pmd 2065 mm/migrate.c if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { pmd 2109 mm/migrate.c set_pmd_at(mm, start, pmd, entry); pmd 2138 mm/migrate.c ptl = pmd_lock(mm, pmd); pmd 2139 mm/migrate.c if (pmd_same(*pmd, entry)) { pmd 2141 mm/migrate.c set_pmd_at(mm, start, pmd, entry); pmd 122 mm/mincore.c static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pmd 131 mm/mincore.c ptl = pmd_trans_huge_lock(pmd, vma); pmd 138 mm/mincore.c if (pmd_trans_unstable(pmd)) { pmd 143 mm/mincore.c ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); pmd 38 mm/mprotect.c static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, pmd 53 mm/mprotect.c if (pmd_trans_unstable(pmd)) pmd 61 mm/mprotect.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 168 mm/mprotect.c static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) pmd 170 mm/mprotect.c pmd_t pmdval = pmd_read_atomic(pmd); pmd 182 mm/mprotect.c pmd_clear_bad(pmd); pmd 193 mm/mprotect.c pmd_t *pmd; pmd 201 mm/mprotect.c pmd = pmd_offset(pud, addr); pmd 215 mm/mprotect.c if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && pmd 216 mm/mprotect.c pmd_none_or_clear_bad_unless_trans_huge(pmd)) pmd 227 mm/mprotect.c if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { pmd 229 mm/mprotect.c __split_huge_pmd(vma, pmd, addr, false, NULL); pmd 231 mm/mprotect.c int nr_ptes = change_huge_pmd(vma, pmd, addr, pmd 246 mm/mprotect.c this_pages = change_pte_range(vma, pmd, addr, next, newprot, pmd 251 mm/mprotect.c } while (pmd++, addr = next, addr != end); pmd 38 mm/mremap.c pmd_t *pmd; pmd 52 mm/mremap.c pmd = pmd_offset(pud, addr); pmd 53 mm/mremap.c if (pmd_none(*pmd)) pmd 56 mm/mremap.c return pmd; pmd 65 mm/mremap.c pmd_t *pmd; pmd 75 mm/mremap.c pmd = pmd_alloc(mm, pud, addr); pmd 76 mm/mremap.c if (!pmd) pmd 79 mm/mremap.c VM_BUG_ON(pmd_trans_huge(*pmd)); pmd 81 mm/mremap.c return pmd; pmd 201 mm/mremap.c pmd_t pmd; pmd 224 mm/mremap.c pmd = *old_pmd; pmd 230 mm/mremap.c set_pmd_at(mm, new_addr, new_pmd, pmd); pmd 75 mm/page_idle.c if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) pmd 18 mm/page_vma_mapped.c pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); pmd 50 mm/page_vma_mapped.c pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); pmd 148 mm/page_vma_mapped.c if (pvmw->pmd && !pvmw->pte) pmd 176 mm/page_vma_mapped.c pvmw->pmd = pmd_offset(pud, pvmw->address); pmd 182 mm/page_vma_mapped.c pmde = READ_ONCE(*pvmw->pmd); pmd 184 mm/page_vma_mapped.c pvmw->ptl = pmd_lock(mm, pvmw->pmd); pmd 185 mm/page_vma_mapped.c if (likely(pmd_trans_huge(*pvmw->pmd))) { pmd 188 mm/page_vma_mapped.c if (pmd_page(*pvmw->pmd) != page) pmd 191 mm/page_vma_mapped.c } else if (!pmd_present(*pvmw->pmd)) { pmd 195 mm/page_vma_mapped.c if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { pmd 196 mm/page_vma_mapped.c swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); pmd 242 mm/page_vma_mapped.c pvmw->ptl = pte_lockptr(mm, pvmw->pmd); pmd 7 mm/pagewalk.c static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pmd 14 mm/pagewalk.c pte = pte_offset_map(pmd, addr); pmd 32 mm/pagewalk.c pmd_t *pmd; pmd 37 mm/pagewalk.c pmd = pmd_offset(pud, addr); pmd 41 mm/pagewalk.c if (pmd_none(*pmd) || !walk->vma) { pmd 53 mm/pagewalk.c err = ops->pmd_entry(pmd, addr, next, walk); pmd 64 mm/pagewalk.c split_huge_pmd(walk->vma, pmd, addr); pmd 65 mm/pagewalk.c if (pmd_trans_unstable(pmd)) pmd 67 mm/pagewalk.c err = walk_pte_range(pmd, addr, next, walk); pmd 70 mm/pagewalk.c } while (pmd++, addr = next, addr != end); pmd 39 mm/pgtable-generic.c void pmd_clear_bad(pmd_t *pmd) pmd 41 mm/pgtable-generic.c pmd_ERROR(*pmd); pmd 42 mm/pgtable-generic.c pmd_clear(pmd); pmd 127 mm/pgtable-generic.c pmd_t pmd; pmd 131 mm/pgtable-generic.c pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmd 133 mm/pgtable-generic.c return pmd; pmd 202 mm/pgtable-generic.c pmd_t pmd; pmd 206 mm/pgtable-generic.c pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmd 210 mm/pgtable-generic.c return pmd; pmd 716 mm/rmap.c pmd_t *pmd = NULL; pmd 731 mm/rmap.c pmd = pmd_offset(pud, address); pmd 737 mm/rmap.c pmde = *pmd; pmd 740 mm/rmap.c pmd = NULL; pmd 742 mm/rmap.c return pmd; pmd 790 mm/rmap.c pvmw.pmd)) pmd 924 mm/rmap.c pmd_t *pmd = pvmw.pmd; pmd 927 mm/rmap.c if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) pmd 931 mm/rmap.c entry = pmdp_invalidate(vma, address, pmd); pmd 934 mm/rmap.c set_pmd_at(vma->vm_mm, address, pmd, entry); pmd 143 mm/sparse-vmemmap.c pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) pmd 145 mm/sparse-vmemmap.c pte_t *pte = pte_offset_kernel(pmd, addr); pmd 170 mm/sparse-vmemmap.c pmd_t *pmd = pmd_offset(pud, addr); pmd 171 mm/sparse-vmemmap.c if (pmd_none(*pmd)) { pmd 175 mm/sparse-vmemmap.c pmd_populate_kernel(&init_mm, pmd, p); pmd 177 mm/sparse-vmemmap.c return pmd; pmd 223 mm/sparse-vmemmap.c pmd_t *pmd; pmd 236 mm/sparse-vmemmap.c pmd = vmemmap_pmd_populate(pud, addr, node); pmd 237 mm/sparse-vmemmap.c if (!pmd) pmd 239 mm/sparse-vmemmap.c pte = vmemmap_pte_populate(pmd, addr, node); pmd 662 mm/swap_state.c orig_pte = pte = pte_offset_map(vmf->pmd, faddr); pmd 1853 mm/swapfile.c static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, pmd 1873 mm/swapfile.c pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pmd 1909 mm/swapfile.c static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, pmd 1923 mm/swapfile.c pte = pte_offset_map(pmd, addr); pmd 1942 mm/swapfile.c vmf.pmd = pmd; pmd 1952 mm/swapfile.c ret = unuse_pte(vma, pmd, addr, entry, page); pmd 1968 mm/swapfile.c pte = pte_offset_map(pmd, addr); pmd 1982 mm/swapfile.c pmd_t *pmd; pmd 1986 mm/swapfile.c pmd = pmd_offset(pud, addr); pmd 1990 mm/swapfile.c if (pmd_none_or_trans_huge_or_clear_bad(pmd)) pmd 1992 mm/swapfile.c ret = unuse_pte_range(vma, pmd, addr, next, type, pmd 1996 mm/swapfile.c } while (pmd++, addr = next, addr != end); pmd 64 mm/vmalloc.c static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) pmd 68 mm/vmalloc.c pte = pte_offset_kernel(pmd, addr); pmd 77 mm/vmalloc.c pmd_t *pmd; pmd 80 mm/vmalloc.c pmd = pmd_offset(pud, addr); pmd 83 mm/vmalloc.c if (pmd_clear_huge(pmd)) pmd 85 mm/vmalloc.c if (pmd_none_or_clear_bad(pmd)) pmd 87 mm/vmalloc.c vunmap_pte_range(pmd, addr, next); pmd 88 mm/vmalloc.c } while (pmd++, addr = next, addr != end); pmd 138 mm/vmalloc.c static int vmap_pte_range(pmd_t *pmd, unsigned long addr, pmd 148 mm/vmalloc.c pte = pte_alloc_kernel(pmd, addr); pmd 167 mm/vmalloc.c pmd_t *pmd; pmd 170 mm/vmalloc.c pmd = pmd_alloc(&init_mm, pud, addr); pmd 171 mm/vmalloc.c if (!pmd) pmd 175 mm/vmalloc.c if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) pmd 177 mm/vmalloc.c } while (pmd++, addr = next, addr != end); pmd 277 mm/vmalloc.c pmd_t *pmd; pmd 304 mm/vmalloc.c pmd = pmd_offset(pud, addr); pmd 305 mm/vmalloc.c WARN_ON_ONCE(pmd_bad(*pmd)); pmd 306 mm/vmalloc.c if (pmd_none(*pmd) || pmd_bad(*pmd)) pmd 309 mm/vmalloc.c ptep = pte_offset_map(pmd, addr); pmd 77 virt/kvm/arm/mmu.c static void kvm_flush_dcache_pmd(pmd_t pmd) pmd 79 virt/kvm/arm/mmu.c __kvm_flush_dcache_pmd(pmd); pmd 100 virt/kvm/arm/mmu.c static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) pmd 102 virt/kvm/arm/mmu.c if (!pmd_thp_or_huge(*pmd)) pmd 105 virt/kvm/arm/mmu.c pmd_clear(pmd); pmd 107 virt/kvm/arm/mmu.c put_page(virt_to_page(pmd)); pmd 179 virt/kvm/arm/mmu.c static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) pmd 181 virt/kvm/arm/mmu.c pte_t *pte_table = pte_offset_kernel(pmd, 0); pmd 182 virt/kvm/arm/mmu.c VM_BUG_ON(pmd_thp_or_huge(*pmd)); pmd 183 virt/kvm/arm/mmu.c pmd_clear(pmd); pmd 186 virt/kvm/arm/mmu.c put_page(virt_to_page(pmd)); pmd 242 virt/kvm/arm/mmu.c static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd, pmd 248 virt/kvm/arm/mmu.c start_pte = pte = pte_offset_kernel(pmd, addr); pmd 265 virt/kvm/arm/mmu.c clear_stage2_pmd_entry(kvm, pmd, start_addr); pmd 272 virt/kvm/arm/mmu.c pmd_t *pmd, *start_pmd; pmd 274 virt/kvm/arm/mmu.c start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr); pmd 277 virt/kvm/arm/mmu.c if (!pmd_none(*pmd)) { pmd 278 virt/kvm/arm/mmu.c if (pmd_thp_or_huge(*pmd)) { pmd 279 virt/kvm/arm/mmu.c pmd_t old_pmd = *pmd; pmd 281 virt/kvm/arm/mmu.c pmd_clear(pmd); pmd 286 virt/kvm/arm/mmu.c put_page(virt_to_page(pmd)); pmd 288 virt/kvm/arm/mmu.c unmap_stage2_ptes(kvm, pmd, addr, next); pmd 291 virt/kvm/arm/mmu.c } while (pmd++, addr = next, addr != end); pmd 365 virt/kvm/arm/mmu.c static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, pmd 370 virt/kvm/arm/mmu.c pte = pte_offset_kernel(pmd, addr); pmd 380 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 383 virt/kvm/arm/mmu.c pmd = stage2_pmd_offset(kvm, pud, addr); pmd 386 virt/kvm/arm/mmu.c if (!pmd_none(*pmd)) { pmd 387 virt/kvm/arm/mmu.c if (pmd_thp_or_huge(*pmd)) pmd 388 virt/kvm/arm/mmu.c kvm_flush_dcache_pmd(*pmd); pmd 390 virt/kvm/arm/mmu.c stage2_flush_ptes(kvm, pmd, addr, next); pmd 392 virt/kvm/arm/mmu.c } while (pmd++, addr = next, addr != end); pmd 470 virt/kvm/arm/mmu.c static void clear_hyp_pmd_entry(pmd_t *pmd) pmd 472 virt/kvm/arm/mmu.c pte_t *pte_table = pte_offset_kernel(pmd, 0); pmd 473 virt/kvm/arm/mmu.c VM_BUG_ON(pmd_thp_or_huge(*pmd)); pmd 474 virt/kvm/arm/mmu.c pmd_clear(pmd); pmd 476 virt/kvm/arm/mmu.c put_page(virt_to_page(pmd)); pmd 479 virt/kvm/arm/mmu.c static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) pmd 483 virt/kvm/arm/mmu.c start_pte = pte = pte_offset_kernel(pmd, addr); pmd 492 virt/kvm/arm/mmu.c clear_hyp_pmd_entry(pmd); pmd 498 virt/kvm/arm/mmu.c pmd_t *pmd, *start_pmd; pmd 500 virt/kvm/arm/mmu.c start_pmd = pmd = pmd_offset(pud, addr); pmd 504 virt/kvm/arm/mmu.c if (!pmd_none(*pmd)) pmd 505 virt/kvm/arm/mmu.c unmap_hyp_ptes(pmd, addr, next); pmd 506 virt/kvm/arm/mmu.c } while (pmd++, addr = next, addr != end); pmd 610 virt/kvm/arm/mmu.c static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, pmd 619 virt/kvm/arm/mmu.c pte = pte_offset_kernel(pmd, addr); pmd 630 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 636 virt/kvm/arm/mmu.c pmd = pmd_offset(pud, addr); pmd 638 virt/kvm/arm/mmu.c BUG_ON(pmd_sect(*pmd)); pmd 640 virt/kvm/arm/mmu.c if (pmd_none(*pmd)) { pmd 646 virt/kvm/arm/mmu.c kvm_pmd_populate(pmd, pte); pmd 647 virt/kvm/arm/mmu.c get_page(virt_to_page(pmd)); pmd 652 virt/kvm/arm/mmu.c create_hyp_pte_mappings(pmd, addr, next, pfn, prot); pmd 664 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 673 virt/kvm/arm/mmu.c pmd = pmd_alloc_one(NULL, addr); pmd 674 virt/kvm/arm/mmu.c if (!pmd) { pmd 678 virt/kvm/arm/mmu.c kvm_pud_populate(pud, pmd); pmd 1038 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 1047 virt/kvm/arm/mmu.c pmd = mmu_memory_cache_alloc(cache); pmd 1048 virt/kvm/arm/mmu.c stage2_pud_populate(kvm, pud, pmd); pmd 1058 virt/kvm/arm/mmu.c pmd_t *pmd, old_pmd; pmd 1061 virt/kvm/arm/mmu.c pmd = stage2_get_pmd(kvm, cache, addr); pmd 1062 virt/kvm/arm/mmu.c VM_BUG_ON(!pmd); pmd 1064 virt/kvm/arm/mmu.c old_pmd = *pmd; pmd 1109 virt/kvm/arm/mmu.c pmd_clear(pmd); pmd 1112 virt/kvm/arm/mmu.c get_page(virt_to_page(pmd)); pmd 1115 virt/kvm/arm/mmu.c kvm_set_pmd(pmd, *new_pmd); pmd 1226 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 1253 virt/kvm/arm/mmu.c pmd = mmu_memory_cache_alloc(cache); pmd 1254 virt/kvm/arm/mmu.c stage2_pud_populate(kvm, pud, pmd); pmd 1258 virt/kvm/arm/mmu.c pmd = stage2_pmd_offset(kvm, pud, addr); pmd 1259 virt/kvm/arm/mmu.c if (!pmd) { pmd 1272 virt/kvm/arm/mmu.c stage2_dissolve_pmd(kvm, addr, pmd); pmd 1275 virt/kvm/arm/mmu.c if (pmd_none(*pmd)) { pmd 1279 virt/kvm/arm/mmu.c kvm_pmd_populate(pmd, pte); pmd 1280 virt/kvm/arm/mmu.c get_page(virt_to_page(pmd)); pmd 1283 virt/kvm/arm/mmu.c pte = pte_offset_kernel(pmd, addr); pmd 1321 virt/kvm/arm/mmu.c static int stage2_pmdp_test_and_clear_young(pmd_t *pmd) pmd 1323 virt/kvm/arm/mmu.c return stage2_ptep_test_and_clear_young((pte_t *)pmd); pmd 1429 virt/kvm/arm/mmu.c static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) pmd 1433 virt/kvm/arm/mmu.c pte = pte_offset_kernel(pmd, addr); pmd 1452 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 1455 virt/kvm/arm/mmu.c pmd = stage2_pmd_offset(kvm, pud, addr); pmd 1459 virt/kvm/arm/mmu.c if (!pmd_none(*pmd)) { pmd 1460 virt/kvm/arm/mmu.c if (pmd_thp_or_huge(*pmd)) { pmd 1461 virt/kvm/arm/mmu.c if (!kvm_s2pmd_readonly(pmd)) pmd 1462 virt/kvm/arm/mmu.c kvm_set_s2pmd_readonly(pmd); pmd 1464 virt/kvm/arm/mmu.c stage2_wp_ptes(pmd, addr, next); pmd 1467 virt/kvm/arm/mmu.c } while (pmd++, addr = next, addr != end); pmd 1864 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 1873 virt/kvm/arm/mmu.c if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte)) pmd 1880 virt/kvm/arm/mmu.c } else if (pmd) { /* THP, HugeTLB */ pmd 1881 virt/kvm/arm/mmu.c *pmd = pmd_mkyoung(*pmd); pmd 1882 virt/kvm/arm/mmu.c pfn = pmd_pfn(*pmd); pmd 2104 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 2108 virt/kvm/arm/mmu.c if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) pmd 2113 virt/kvm/arm/mmu.c else if (pmd) pmd 2114 virt/kvm/arm/mmu.c return stage2_pmdp_test_and_clear_young(pmd); pmd 2122 virt/kvm/arm/mmu.c pmd_t *pmd; pmd 2126 virt/kvm/arm/mmu.c if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) pmd 2131 virt/kvm/arm/mmu.c else if (pmd) pmd 2132 virt/kvm/arm/mmu.c return pmd_young(*pmd);