/linux-4.1.27/arch/x86/mm/ |
D | init.c | 214 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 215 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 281 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 283 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 285 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 295 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 297 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 300 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range() 301 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 322 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() [all …]
|
D | init_64.c | 63 for (; addr < end; addr += PMD_SIZE) { in ident_pmd_init() 336 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping() 382 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap() 393 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap() 460 next = (address & PMD_MASK) + PMD_SIZE; in phys_pmd_init() 879 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table() 880 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table() 883 get_order(PMD_SIZE)); in remove_pmd_table() 895 PMD_SIZE)) { in remove_pmd_table() 897 get_order(PMD_SIZE)); in remove_pmd_table() [all …]
|
D | kasan_init_64.c | 85 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { in zero_pmd_populate() 89 addr += PMD_SIZE; in zero_pmd_populate()
|
D | pageattr.c | 101 return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; in highmap_end_pfn() 768 if (start & (PMD_SIZE - 1)) { in unmap_pmd_range() 769 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; in unmap_pmd_range() 781 while (end - start >= PMD_SIZE) { in unmap_pmd_range() 785 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); in unmap_pmd_range() 787 start += PMD_SIZE; in unmap_pmd_range() 911 if (start & (PMD_SIZE - 1)) { in populate_pmd() 913 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; in populate_pmd() 940 while (end - start >= PMD_SIZE) { in populate_pmd() 954 start += PMD_SIZE; in populate_pmd() [all …]
|
D | init_32.c | 148 vaddr += PMD_SIZE; in page_table_range_init_count() 233 vaddr += PMD_SIZE; in page_table_range_init() 529 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; in early_ioremap_page_table_range_init()
|
D | hugetlbpage.c | 163 if (ps == PMD_SIZE) { in setup_hugepagesz()
|
D | pgtable.c | 595 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE); in pmd_set_huge()
|
D | fault.c | 237 address += PMD_SIZE) { in vmalloc_sync_all()
|
/linux-4.1.27/arch/sh/include/asm/ |
D | pgtable-3level.h | 22 #define PMD_SIZE (1UL << PMD_SHIFT) macro 23 #define PMD_MASK (~(PMD_SIZE-1)) 25 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
/linux-4.1.27/arch/tile/mm/ |
D | hugetlbpage.c | 72 if (sz >= PMD_SIZE) { in huge_pte_alloc() 73 BUG_ON(sz != PMD_SIZE && in huge_pte_alloc() 74 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD])); in huge_pte_alloc() 84 BUG_ON(sz != PMD_SIZE); in huge_pte_alloc() 279 } else if (ps >= PMD_SIZE) { in __setup_hugepagesz() 334 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE || in add_default_hugepagesz()
|
D | init.c | 181 start = round_down(start, PMD_SIZE); in page_table_range_init() 182 end = round_up(end, PMD_SIZE); in page_table_range_init() 183 for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) { in page_table_range_init()
|
/linux-4.1.27/arch/s390/mm/ |
D | vmem.c | 116 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { in vmem_add_mem() 121 address += PMD_SIZE; in vmem_add_mem() 175 address += PMD_SIZE; in vmem_remove_range() 180 address += PMD_SIZE; in vmem_remove_range() 230 new_page = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate() 235 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate() 243 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate()
|
D | pgtable.c | 321 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; in __gmap_segment_gaddr() 378 if ((to | len) & (PMD_SIZE - 1)) in gmap_unmap_segment() 385 for (off = 0; off < len; off += PMD_SIZE) in gmap_unmap_segment() 409 if ((from | to | len) & (PMD_SIZE - 1)) in gmap_map_segment() 417 for (off = 0; off < len; off += PMD_SIZE) { in gmap_map_segment() 666 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { in gmap_discard() 676 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); in gmap_discard()
|
D | dump_pagetables.c | 144 addr += PMD_SIZE; in walk_pmd_level()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | pgtable_32_types.h | 11 # define PMD_SIZE (1UL << PMD_SHIFT) macro 12 # define PMD_MASK (~(PMD_SIZE - 1))
|
D | pgtable_64_types.h | 48 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 49 #define PMD_MASK (~(PMD_SIZE - 1))
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | pgtable-ppc64-64k.h | 27 #define PMD_SIZE (1UL << PMD_SHIFT) macro 28 #define PMD_MASK (~(PMD_SIZE-1))
|
D | pgtable-ppc64-4k.h | 27 #define PMD_SIZE (1UL << PMD_SHIFT) macro 28 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/parisc/kernel/ |
D | pci-dma.c | 91 if (end > PMD_SIZE) in map_pte_uncached() 92 end = PMD_SIZE; in map_pte_uncached() 126 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 127 orig_vaddr += PMD_SIZE; in map_pmd_uncached() 171 if (end > PMD_SIZE) in unmap_uncached_pte() 172 end = PMD_SIZE; in unmap_uncached_pte() 211 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 212 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
|
/linux-4.1.27/include/asm-generic/ |
D | pgtable-nopmd.h | 21 #define PMD_SIZE (1UL << PMD_SHIFT) macro 22 #define PMD_MASK (~(PMD_SIZE-1))
|
D | pgtable.h | 303 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
|
/linux-4.1.27/arch/nios2/mm/ |
D | ioremap.c | 33 if (end > PMD_SIZE) in remap_area_pte() 34 end = PMD_SIZE; in remap_area_pte() 70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
|
/linux-4.1.27/arch/mips/mm/ |
D | ioremap.c | 29 if (end > PMD_SIZE) in remap_area_pte() 30 end = PMD_SIZE; in remap_area_pte() 61 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
|
D | init.c | 243 vaddr += PMD_SIZE; in fixrange_init()
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | pgtable_mm.h | 37 #define PMD_SIZE (1UL << PMD_SHIFT) macro 38 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/arm/include/asm/ |
D | pgtable-2level.h | 88 #define PMD_SIZE (1UL << PMD_SHIFT) macro 89 #define PMD_MASK (~(PMD_SIZE-1))
|
D | kvm_mmu.h | 148 ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 248 unsigned long size = PMD_SIZE; in __kvm_flush_dcache_pmd()
|
D | highmem.h | 6 #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
|
D | memory.h | 69 #define MODULES_END (PAGE_OFFSET - PMD_SIZE)
|
D | pgtable-3level.h | 50 #define PMD_SIZE (1UL << PMD_SHIFT) macro
|
/linux-4.1.27/arch/tile/include/asm/ |
D | pgtable_64.h | 32 #define PMD_SIZE HPAGE_SIZE macro 33 #define PMD_MASK (~(PMD_SIZE-1))
|
D | hugetlb.h | 119 if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) in arch_make_huge_pte()
|
/linux-4.1.27/arch/um/include/asm/ |
D | pgtable-3level.h | 27 #define PMD_SIZE (1UL << PMD_SHIFT) macro 28 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/lib/ |
D | ioremap.c | 85 ((next - addr) == PMD_SIZE) && in ioremap_pmd_range() 86 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { in ioremap_pmd_range()
|
/linux-4.1.27/arch/arm/mm/ |
D | mmu.c | 999 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps() 1128 if (!IS_ALIGNED(block_start, PMD_SIZE)) in sanity_check_meminfo() 1130 else if (!IS_ALIGNED(block_end, PMD_SIZE)) in sanity_check_meminfo() 1145 memblock_limit = round_down(memblock_limit, PMD_SIZE); in sanity_check_meminfo() 1160 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table() 1165 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table() 1167 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) in prepare_page_table() 1182 addr < VMALLOC_START; addr += PMD_SIZE) in prepare_page_table() 1234 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) in devicemaps_init() 1411 map_end = ALIGN(init_mm.brk, PMD_SIZE); in early_paging_init() [all …]
|
D | ioremap.c | 171 addr += PMD_SIZE; in unmap_area_sections() 210 addr += PMD_SIZE; in remap_area_sections() 247 addr += PMD_SIZE; in remap_area_supersections()
|
D | dump.c | 276 addr = start + i * PMD_SIZE; in walk_pmd() 282 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) in walk_pmd()
|
D | dma-mapping.c | 439 addr += PMD_SIZE) in dma_contiguous_remap()
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | pgtable-hwdef.h | 26 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 27 #define PMD_MASK (~(PMD_SIZE-1))
|
D | kvm_mmu.h | 260 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
|
/linux-4.1.27/arch/x86/xen/ |
D | p2m.c | 385 (unsigned long)(p2m + pfn) + i * PMD_SIZE); in xen_rebuild_p2m_list() 399 PMD_SIZE * PMDS_PER_MID_PAGE); in xen_vmalloc_p2m_tree() 400 vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); in xen_vmalloc_p2m_tree() 467 vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1); in alloc_p2m_pmd() 492 vaddr += PMD_SIZE; in alloc_p2m_pmd()
|
D | mmu.c | 1100 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in xen_cleanhighmap() 1106 pmd++, vaddr += PMD_SIZE) { in xen_cleanhighmap() 1138 size = roundup(size, PMD_SIZE); in xen_pagetable_p2m_free() 1153 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); in xen_pagetable_p2m_free()
|
/linux-4.1.27/arch/arm64/mm/ |
D | hugetlbpage.c | 58 if (ps == PMD_SIZE) { in setup_hugepagesz()
|
D | mmu.c | 120 addr += PMD_SIZE; in split_pud() 360 limit = PHYS_OFFSET + PMD_SIZE; in map_mem() 382 start = ALIGN(start, PMD_SIZE); in map_mem() 543 p = vmemmap_alloc_block_buf(PMD_SIZE, node); in vmemmap_populate()
|
D | dump.c | 252 addr = start + i * PMD_SIZE; in walk_pmd()
|
/linux-4.1.27/arch/metag/include/asm/ |
D | highmem.h | 26 #define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
|
/linux-4.1.27/mm/ |
D | sparse-vmemmap.c | 204 size = ALIGN(size, PMD_SIZE); in sparse_mem_maps_populate_node() 206 PMD_SIZE, __pa(MAX_DMA_ADDRESS)); in sparse_mem_maps_populate_node()
|
D | mremap.c | 182 next = (old_addr + PMD_SIZE) & PMD_MASK; in move_page_tables() 218 next = (new_addr + PMD_SIZE) & PMD_MASK; in move_page_tables()
|
D | memory.c | 505 addr += PMD_SIZE; in free_pgd_range() 515 end -= PMD_SIZE; in free_pgd_range() 549 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
|
D | hugetlb.c | 3821 BUG_ON(sz != PMD_SIZE); in huge_pte_alloc()
|
/linux-4.1.27/arch/mips/include/asm/ |
D | pgtable-64.h | 53 #define PMD_SIZE (1UL << PMD_SHIFT) macro 54 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/m32r/include/asm/ |
D | pgtable.h | 50 #define PMD_SIZE (1UL << PMD_SHIFT) macro 51 #define PMD_MASK (~(PMD_SIZE - 1))
|
/linux-4.1.27/arch/x86/platform/efi/ |
D | efi_64.c | 245 if (!(pa & (PMD_SIZE - 1))) { in efi_map_region() 248 u64 pa_offset = pa & (PMD_SIZE - 1); in efi_map_region() 255 efi_va -= PMD_SIZE; in efi_map_region()
|
/linux-4.1.27/arch/metag/mm/ |
D | init.c | 280 vaddr += PMD_SIZE; in allocate_pgtables() 304 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; in fixedrange_init()
|
/linux-4.1.27/arch/avr32/include/asm/ |
D | pgtable.h | 27 #define PMD_SIZE (1UL << PMD_SHIFT) macro 28 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | pgtable_32.h | 37 #define PMD_SIZE (1UL << PMD_SHIFT) macro 38 #define PMD_MASK (~(PMD_SIZE-1))
|
D | pgtable_64.h | 49 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) macro 50 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | pgtable.h | 32 #define PMD_SIZE (1UL << PMD_SHIFT) macro 33 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/mn10300/mm/ |
D | pgtable.c | 41 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ in set_pmd_pfn()
|
/linux-4.1.27/arch/sh/mm/ |
D | init.c | 184 vaddr += PMD_SIZE; in page_table_range_init() 374 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; in paging_init()
|
/linux-4.1.27/arch/m68k/sun3x/ |
D | dvma.c | 124 end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK; in dvma_map_cpu()
|
/linux-4.1.27/arch/x86/kernel/ |
D | head_64.S | 139 addq $PMD_SIZE, %rax 502 KERNEL_IMAGE_SIZE/PMD_SIZE)
|
D | setup_percpu.c | 200 atom_size = PMD_SIZE; in setup_per_cpu_areas()
|
/linux-4.1.27/arch/microblaze/include/asm/ |
D | pgtable.h | 143 #define PMD_SIZE (1UL << PMD_SHIFT) macro 144 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/frv/include/asm/ |
D | pgtable.h | 135 #define PMD_SIZE (1UL << PMD_SHIFT) macro 136 #define PMD_MASK (~(PMD_SIZE - 1))
|
/linux-4.1.27/arch/sparc/mm/ |
D | init_64.c | 1470 return vstart + PMD_SIZE; in kernel_map_hugepmd() 1487 pte_val += PMD_SIZE; in kernel_map_hugepmd() 1488 vstart += PMD_SIZE; in kernel_map_hugepmd() 1498 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) in kernel_can_map_hugepmd() 1559 this_end = (vstart + PMD_SIZE) & PMD_MASK; in kernel_map_range() 2353 vend = ALIGN(vend, PMD_SIZE); in vmemmap_populate() 2354 for (; vstart < vend; vstart += PMD_SIZE) { in vmemmap_populate() 2381 void *block = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate()
|
D | srmmu.c | 684 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_early_allocate_ptable_skeleton() 686 start = (start + PMD_SIZE) & PMD_MASK; in srmmu_early_allocate_ptable_skeleton() 715 if (start > (0xffffffffUL - PMD_SIZE)) in srmmu_allocate_ptable_skeleton() 717 start = (start + PMD_SIZE) & PMD_MASK; in srmmu_allocate_ptable_skeleton()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | tlb_hash64.c | 237 addr = _ALIGN_DOWN(addr, PMD_SIZE); in flush_tlb_pmd_range()
|
D | tlb_nohash.c | 403 unsigned long end = address + PMD_SIZE; in tlb_flush_pgtable()
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | pgtable.h | 110 #define PMD_SIZE (1UL << PMD_SHIFT) macro 111 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/um/kernel/ |
D | mem.c | 108 vaddr += PMD_SIZE; in fixrange_init()
|
D | tlb.c | 346 last = ADD_ROUND(addr, PMD_SIZE); in flush_tlb_kernel_range_common()
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | pgtable.h | 98 #define PMD_SIZE (1UL << PMD_SHIFT) macro 99 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/x86/vdso/ |
D | vma.c | 70 end = (start + len + PMD_SIZE - 1) & PMD_MASK; in vdso_addr()
|
/linux-4.1.27/include/linux/ |
D | mmu_notifier.h | 310 PMD_SIZE); \
|
D | hugetlb.h | 457 if (huge_page_size(h) == PMD_SIZE) in huge_pte_lockptr()
|
/linux-4.1.27/Documentation/vm/ |
D | split_page_table_lock | 44 takes pmd split lock for PMD_SIZE page, mm->page_table_lock
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | pgtable.h | 65 #define PMD_SIZE (1UL << PMD_SHIFT) macro
|
/linux-4.1.27/arch/s390/include/asm/ |
D | pgtable.h | 70 #define PMD_SIZE (1UL << PMD_SHIFT) macro 71 #define PMD_MASK (~(PMD_SIZE-1))
|
/linux-4.1.27/arch/ia64/mm/ |
D | init.c | 413 end_address += PMD_SIZE; in vmemmap_find_next_valid_pfn()
|
/linux-4.1.27/arch/arm/kvm/ |
D | mmu.c | 1323 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); in user_mem_abort()
|
/linux-4.1.27/fs/proc/ |
D | task_mmu.c | 967 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
|