/linux-4.4.14/arch/m32r/mm/ |
D | discontig.c | 28 unsigned long start_pfn; member 40 unsigned long start_pfn, holes, free_pfn; in mem_prof_init() local 47 mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); in mem_prof_init() 54 start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START); in mem_prof_init() 56 if (start_pfn & (zone_alignment - 1)) { in mem_prof_init() 58 while (start_pfn >= ul) in mem_prof_init() 61 start_pfn = ul - zone_alignment; in mem_prof_init() 62 holes = free_pfn - start_pfn; in mem_prof_init() 65 mp->start_pfn = start_pfn; in mem_prof_init() 87 min_pfn = mp->start_pfn; in setup_memory() [all …]
|
D | init.c | 64 unsigned long start_pfn; in zone_sizes_init() local 67 start_pfn = START_PFN(0); in zone_sizes_init() 72 zones_size[ZONE_DMA] = low - start_pfn; in zone_sizes_init() 75 zones_size[ZONE_DMA] = low - start_pfn; in zone_sizes_init() 81 start_pfn = __MEMORY_START >> PAGE_SHIFT; in zone_sizes_init() 84 free_area_init_node(0, zones_size, start_pfn, 0); in zone_sizes_init()
|
/linux-4.4.14/mm/ |
D | memory_hotplug.c | 185 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 191 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 219 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 225 if (!pfn_valid(start_pfn)) in register_page_bootmem_info_section() 228 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 288 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, in grow_zone_span() argument 296 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in grow_zone_span() 297 zone->zone_start_pfn = start_pfn; in grow_zone_span() 305 static void resize_zone(struct zone *zone, unsigned long start_pfn, in resize_zone() argument 310 if (end_pfn - start_pfn) { in resize_zone() [all …]
|
D | page_isolation.c | 26 arg.start_pfn = pfn; in set_migratetype_isolate() 158 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument 165 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in start_isolate_page_range() 168 for (pfn = start_pfn; in start_isolate_page_range() 180 for (pfn = start_pfn; in start_isolate_page_range() 191 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in undo_isolate_page_range() argument 196 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in undo_isolate_page_range() 198 for (pfn = start_pfn; in undo_isolate_page_range() 245 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument 258 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated() [all …]
|
D | page_ext.c | 279 static int __meminit online_page_ext(unsigned long start_pfn, in online_page_ext() argument 286 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_ext() 287 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 295 nid = pfn_to_nid(start_pfn); in online_page_ext() 314 static int __meminit offline_page_ext(unsigned long start_pfn, in offline_page_ext() argument 319 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_ext() 320 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 336 ret = online_page_ext(mn->start_pfn, in page_ext_callback() 340 offline_page_ext(mn->start_pfn, in page_ext_callback() 344 offline_page_ext(mn->start_pfn, in page_ext_callback() [all …]
|
D | compaction.c | 91 static struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page() argument 100 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) in pageblock_pfn_to_page() 103 start_page = pfn_to_page(start_pfn); in pageblock_pfn_to_page() 213 unsigned long start_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() local 220 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable() 395 unsigned long *start_pfn, in isolate_freepages_block() argument 404 unsigned long blockpfn = *start_pfn; in isolate_freepages_block() 513 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, in isolate_freepages_block() 517 *start_pfn = blockpfn; in isolate_freepages_block() 555 unsigned long start_pfn, unsigned long end_pfn) in isolate_freepages_range() argument [all …]
|
D | page_alloc.c | 354 unsigned long sp, start_pfn; in page_outside_zone_boundaries() local 358 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 367 start_pfn, start_pfn + sp); in page_outside_zone_boundaries() 956 unsigned long start_pfn = PFN_DOWN(start); in reserve_bootmem_region() local 959 for (; start_pfn < end_pfn; start_pfn++) { in reserve_bootmem_region() 960 if (pfn_valid(start_pfn)) { in reserve_bootmem_region() 961 struct page *page = pfn_to_page(start_pfn); in reserve_bootmem_region() 963 init_reserved_page(start_pfn); in reserve_bootmem_region() 1535 unsigned long start_pfn, end_pfn; in move_freepages_block() local 1538 start_pfn = page_to_pfn(page); in move_freepages_block() [all …]
|
D | sparse.c | 144 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument 153 if (*start_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 156 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 158 *start_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 163 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 194 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, in node_memmap_size_bytes() argument 200 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); in node_memmap_size_bytes() 201 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in node_memmap_size_bytes() 693 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) in sparse_add_one_section() argument 695 unsigned long section_nr = pfn_to_section_nr(start_pfn); in sparse_add_one_section() [all …]
|
D | nobootmem.c | 113 unsigned long start_pfn = PFN_UP(start); in __free_memory_core() local 117 if (start_pfn > end_pfn) in __free_memory_core() 120 __free_pages_memory(start_pfn, end_pfn); in __free_memory_core() 122 return end_pfn - start_pfn; in __free_memory_core()
|
D | internal.h | 221 unsigned long start_pfn, unsigned long end_pfn); 404 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, 407 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument
|
D | memblock.c | 1444 unsigned long start_pfn, end_pfn; in memblock_mem_size() local 1447 start_pfn = memblock_region_memory_base_pfn(r); in memblock_mem_size() 1449 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); in memblock_mem_size() 1451 pages += end_pfn - start_pfn; in memblock_mem_size() 1524 unsigned long *start_pfn, unsigned long *end_pfn) in memblock_search_pfn_nid() argument 1532 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid()
|
D | hugetlb.c | 1027 static int __alloc_gigantic_page(unsigned long start_pfn, in __alloc_gigantic_page() argument 1030 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page() 1031 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in __alloc_gigantic_page() 1034 static bool pfn_range_valid_gigantic(unsigned long start_pfn, in pfn_range_valid_gigantic() argument 1037 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic() 1040 for (i = start_pfn; i < end_pfn; i++) { in pfn_range_valid_gigantic() 1060 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument 1062 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn() 1434 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) in dissolve_free_huge_pages() argument 1441 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order)); in dissolve_free_huge_pages() [all …]
|
D | ksm.c | 2012 static void ksm_check_stable_tree(unsigned long start_pfn, in ksm_check_stable_tree() argument 2024 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree() 2039 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree() 2073 ksm_check_stable_tree(mn->start_pfn, in ksm_memory_callback() 2074 mn->start_pfn + mn->nr_pages); in ksm_memory_callback()
|
D | vmstat.c | 1003 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() local 1007 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
|
D | kmemleak.c | 1383 unsigned long start_pfn = node_start_pfn(i); in kmemleak_scan() local 1387 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()
|
/linux-4.4.14/arch/mips/loongson64/loongson-3/ |
D | numa.c | 128 u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; in szmem() local 142 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; in szmem() 144 end_pfn = start_pfn + node_psize; in szmem() 149 start_pfn, end_pfn, num_physpages); in szmem() 152 memblock_add_node(PFN_PHYS(start_pfn), in szmem() 153 PFN_PHYS(end_pfn - start_pfn), node); in szmem() 156 start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; in szmem() 158 end_pfn = start_pfn + node_psize; in szmem() 163 start_pfn, end_pfn, num_physpages); in szmem() 166 memblock_add_node(PFN_PHYS(start_pfn), in szmem() [all …]
|
/linux-4.4.14/arch/sparc/mm/ |
D | init_32.c | 77 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages() local 83 if (start_pfn < max_low_pfn) in calc_highpages() 84 start_pfn = max_low_pfn; in calc_highpages() 86 nr += end_pfn - start_pfn; in calc_highpages() 116 unsigned long bootmap_size, start_pfn; in bootmem_init() local 148 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); in bootmem_init() 151 start_pfn >>= PAGE_SHIFT; in bootmem_init() 153 bootmap_pfn = start_pfn; in bootmem_init() 181 if (initrd_start >= (start_pfn << PAGE_SHIFT) && in bootmem_init() 182 initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) in bootmem_init() [all …]
|
D | init_64.c | 862 unsigned long start_pfn, end_pfn; in allocate_node_data() local 879 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_node_data() 880 p->node_start_pfn = start_pfn; in allocate_node_data() 881 p->node_spanned_pages = end_pfn - start_pfn; in allocate_node_data()
|
/linux-4.4.14/arch/sh/mm/ |
D | numa.c | 30 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 36 start_pfn = PFN_DOWN(start); in setup_bootmem_node() 44 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 52 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 53 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node() 56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in setup_bootmem_node() 60 start_pfn, end_pfn); in setup_bootmem_node() 65 reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT, in setup_bootmem_node()
|
D | init.c | 195 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 218 NODE_DATA(nid)->node_start_pfn = start_pfn; in allocate_pgdat() 219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 270 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 271 start_pfn = memblock_region_memory_base_pfn(reg); in do_init_bootmem() 273 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem() 290 unsigned long start_pfn; in early_reserve_mem() local 298 start_pfn = PFN_UP(__pa(_end)); in early_reserve_mem() 306 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); in early_reserve_mem() [all …]
|
/linux-4.4.14/arch/metag/mm/ |
D | numa.c | 33 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 39 start_pfn = start >> PAGE_SHIFT; in setup_bootmem_node() 44 memblock_set_node(PFN_PHYS(start_pfn), in setup_bootmem_node() 45 PFN_PHYS(end_pfn - start_pfn), in setup_bootmem_node() 55 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 56 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node() 59 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in setup_bootmem_node() 63 start_pfn, end_pfn); in setup_bootmem_node()
|
D | init.c | 111 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 116 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 135 NODE_DATA(nid)->node_start_pfn = start_pfn; in allocate_pgdat() 136 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 203 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 204 start_pfn = memblock_region_memory_base_pfn(reg); in do_init_bootmem() 206 memblock_set_node(PFN_PHYS(start_pfn), in do_init_bootmem() 207 PFN_PHYS(end_pfn - start_pfn), in do_init_bootmem() 227 unsigned long start_pfn, heap_start; in init_and_reserve_mem() local 239 start_pfn = PFN_UP(__pa(heap_start)); in init_and_reserve_mem() [all …]
|
/linux-4.4.14/arch/x86/xen/ |
D | setup.c | 93 static void __init xen_add_extra_mem(unsigned long start_pfn, in xen_add_extra_mem() argument 105 xen_extra_mem[i].start_pfn = start_pfn; in xen_add_extra_mem() 110 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == in xen_add_extra_mem() 111 start_pfn) { in xen_add_extra_mem() 119 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem() 122 static void __init xen_del_extra_mem(unsigned long start_pfn, in xen_del_extra_mem() argument 129 start_r = xen_extra_mem[i].start_pfn; in xen_del_extra_mem() 133 if (start_r == start_pfn) { in xen_del_extra_mem() 135 xen_extra_mem[i].start_pfn += n_pfns; in xen_del_extra_mem() 140 if (start_r + size_r == start_pfn + n_pfns) { in xen_del_extra_mem() [all …]
|
/linux-4.4.14/arch/x86/mm/ |
D | init.c | 190 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 193 if (start_pfn < end_pfn) { in save_mr() 196 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 268 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 275 pfn = start_pfn = PFN_DOWN(start); in split_mem_range() 292 if (start_pfn < end_pfn) { in split_mem_range() 293 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); in split_mem_range() 298 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 307 if (start_pfn < end_pfn) { in split_mem_range() 308 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, in split_mem_range() [all …]
|
D | init_32.c | 258 unsigned long start_pfn, end_pfn; in kernel_physical_mapping_init() local 268 start_pfn = start >> PAGE_SHIFT; in kernel_physical_mapping_init() 292 pfn = start_pfn; in kernel_physical_mapping_init() 432 unsigned long start_pfn, unsigned long end_pfn) in add_highpages_with_active_regions() argument 439 start_pfn, end_pfn); in add_highpages_with_active_regions() 441 start_pfn, end_pfn); in add_highpages_with_active_regions() 831 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 834 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 840 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 844 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory() [all …]
|
D | numa_32.c | 64 unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, in node_memmap_size_bytes() argument 67 unsigned long nr_pages = end_pfn - start_pfn; in node_memmap_size_bytes()
|
D | pat.c | 385 unsigned long start_pfn = start >> PAGE_SHIFT; in pat_pagerange_is_ram() local 387 struct pagerange_state state = {start_pfn, 0, 0}; in pat_pagerange_is_ram() 396 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) in pat_pagerange_is_ram() 397 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; in pat_pagerange_is_ram() 399 if (start_pfn < end_pfn) { in pat_pagerange_is_ram() 400 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, in pat_pagerange_is_ram()
|
D | ioremap.c | 56 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, in __ioremap_check_ram() argument 62 if (pfn_valid(start_pfn + i) && in __ioremap_check_ram() 63 !PageReserved(pfn_to_page(start_pfn + i))) in __ioremap_check_ram()
|
D | numa_emulation.c | 33 unsigned long start_pfn = PFN_UP(start); in mem_hole_size() local 36 if (start_pfn < end_pfn) in mem_hole_size() 37 return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); in mem_hole_size()
|
D | init_64.c | 695 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 701 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 1019 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 1024 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory() 1026 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/linux-4.4.14/arch/parisc/mm/ |
D | init.c | 146 if (pmem_ranges[j-1].start_pfn < in setup_bootmem() 147 pmem_ranges[j].start_pfn) { in setup_bootmem() 151 tmp = pmem_ranges[j-1].start_pfn; in setup_bootmem() 152 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; in setup_bootmem() 153 pmem_ranges[j].start_pfn = tmp; in setup_bootmem() 167 if (pmem_ranges[i].start_pfn - in setup_bootmem() 168 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 173 pmem_ranges[i].start_pfn - in setup_bootmem() 174 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 192 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); in setup_bootmem() [all …]
|
/linux-4.4.14/arch/s390/mm/ |
D | init.c | 153 unsigned long start_pfn = PFN_DOWN(start); in arch_add_memory() local 163 if (start_pfn < dma_end_pfn) { in arch_add_memory() 164 nr_pages = (start_pfn + size_pages > dma_end_pfn) ? in arch_add_memory() 165 dma_end_pfn - start_pfn : size_pages; in arch_add_memory() 167 } else if (start_pfn < normal_end_pfn) { in arch_add_memory() 168 nr_pages = (start_pfn + size_pages > normal_end_pfn) ? in arch_add_memory() 169 normal_end_pfn - start_pfn : size_pages; in arch_add_memory() 176 start_pfn, size_pages); in arch_add_memory() 179 start_pfn += nr_pages; in arch_add_memory()
|
/linux-4.4.14/arch/alpha/mm/ |
D | numa.c | 49 i, cluster->usage, cluster->start_pfn, in show_mem_layout() 50 cluster->start_pfn + cluster->numpages); in show_mem_layout() 87 start = cluster->start_pfn; in setup_memory_node() 98 i, cluster->usage, cluster->start_pfn, in setup_memory_node() 99 cluster->start_pfn + cluster->numpages); in setup_memory_node() 172 start = cluster->start_pfn; in setup_memory_node() 212 start = cluster->start_pfn; in setup_memory_node() 213 end = cluster->start_pfn + cluster->numpages; in setup_memory_node() 306 unsigned long start_pfn = bdata->node_min_pfn; in paging_init() local 309 if (dma_local_pfn >= end_pfn - start_pfn) in paging_init() [all …]
|
/linux-4.4.14/arch/score/kernel/ |
D | setup.c | 46 unsigned long start_pfn, bootmap_size; in bootmem_init() local 49 start_pfn = PFN_UP(__pa(&_end)); in bootmem_init() 56 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, in bootmem_init() 61 free_bootmem(PFN_PHYS(start_pfn), in bootmem_init() 62 (max_low_pfn - start_pfn) << PAGE_SHIFT); in bootmem_init() 63 memory_present(0, start_pfn, max_low_pfn); in bootmem_init() 66 reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); in bootmem_init()
|
/linux-4.4.14/arch/cris/kernel/ |
D | setup.c | 67 unsigned long start_pfn, max_pfn; in setup_arch() local 110 start_pfn = PFN_UP(memory_start); /* usually c0000000 + kernel + romfs */ in setup_arch() 132 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, in setup_arch() 138 free_bootmem(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn - start_pfn)); in setup_arch() 149 reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); in setup_arch()
|
/linux-4.4.14/arch/avr32/mm/ |
D | init.c | 78 unsigned long low, start_pfn; in paging_init() local 80 start_pfn = pgdat->bdata->node_min_pfn; in paging_init() 84 zones_size[ZONE_NORMAL] = low - start_pfn; in paging_init() 87 nid, start_pfn, low); in paging_init() 89 free_area_init_node(nid, zones_size, start_pfn, NULL); in paging_init()
|
/linux-4.4.14/drivers/base/ |
D | memory.c | 188 static bool pages_correctly_reserved(unsigned long start_pfn) in pages_correctly_reserved() argument 192 unsigned long pfn = start_pfn; in pages_correctly_reserved() 227 unsigned long start_pfn; in memory_block_action() local 232 start_pfn = section_nr_to_pfn(phys_index); in memory_block_action() 233 first_page = pfn_to_page(start_pfn); in memory_block_action() 237 if (!pages_correctly_reserved(start_pfn)) in memory_block_action() 240 ret = online_pages(start_pfn, nr_pages, online_type); in memory_block_action() 243 ret = offline_pages(start_pfn, nr_pages); in memory_block_action() 390 unsigned long start_pfn, end_pfn; in show_valid_zones() local 395 start_pfn = section_nr_to_pfn(mem->start_section_nr); in show_valid_zones() [all …]
|
D | node.c | 462 unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; in link_mem_sections() local 463 unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; in link_mem_sections() 468 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in link_mem_sections()
|
/linux-4.4.14/include/trace/events/ |
D | compaction.h | 71 unsigned long start_pfn, 76 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 79 __field(unsigned long, start_pfn) 86 __entry->start_pfn = start_pfn; 93 __entry->start_pfn, 102 unsigned long start_pfn, 107 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 113 unsigned long start_pfn, 118 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
|
/linux-4.4.14/include/linux/ |
D | memory_hotplug.h | 105 extern int __remove_pages(struct zone *zone, unsigned long start_pfn, 110 extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, 247 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 259 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) in offline_pages() argument 267 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 274 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 277 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
|
D | page-isolation.h | 51 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 59 undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 65 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
D | memory.h | 37 int arch_get_memory_phys_device(unsigned long start_pfn); 49 unsigned long start_pfn; member 64 unsigned long start_pfn; /* Start of range to check */ member
|
D | iova.h | 32 unsigned long start_pfn; /* Lower limit for this domain */ member 85 unsigned long start_pfn, unsigned long pfn_32bit);
|
D | ioport.h | 236 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
D | memblock.h | 188 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
|
D | hugetlb.h | 457 extern void dissolve_free_huge_pages(unsigned long start_pfn,
|
D | mm.h | 1775 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1777 extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1780 unsigned long *start_pfn, unsigned long *end_pfn);
|
D | mmzone.h | 748 extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
|
/linux-4.4.14/arch/s390/numa/ |
D | numa.c | 106 unsigned long start_pfn, end_pfn; in numa_setup_memory() local 110 start_pfn = ULONG_MAX; in numa_setup_memory() 113 if (t_start < start_pfn) in numa_setup_memory() 114 start_pfn = t_start; in numa_setup_memory() 118 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in numa_setup_memory()
|
/linux-4.4.14/drivers/hv/ |
D | hv_balloon.c | 439 unsigned long start_pfn; member 599 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) in hv_bring_pgs_online() argument 605 pg = pfn_to_page(start_pfn + i); in hv_bring_pgs_online() 618 unsigned long start_pfn; in hv_mem_hot_add() local 623 start_pfn = start + (i * HA_CHUNK); in hv_mem_hot_add() 640 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); in hv_mem_hot_add() 641 ret = add_memory(nid, PFN_PHYS((start_pfn)), in hv_mem_hot_add() 685 cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn); in hv_online_page() 701 static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) in pfn_covered() argument 717 if ((start_pfn >= has->end_pfn)) in pfn_covered() [all …]
|
/linux-4.4.14/arch/mn10300/kernel/ |
D | setup.c | 95 unsigned long kstart_pfn, start_pfn, free_pfn, end_pfn; in setup_arch() local 125 start_pfn = (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT); in setup_arch() 132 start_pfn, in setup_arch() 135 if (kstart_pfn > start_pfn) in setup_arch() 136 free_bootmem(PFN_PHYS(start_pfn), in setup_arch() 137 PFN_PHYS(kstart_pfn - start_pfn)); in setup_arch()
|
/linux-4.4.14/arch/s390/include/asm/ |
D | diag.h | 42 static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) in diag10_range() argument 46 start_addr = start_pfn << PAGE_SHIFT; in diag10_range() 47 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; in diag10_range()
|
/linux-4.4.14/arch/powerpc/mm/ |
D | mem.c | 120 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 133 return __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 139 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 144 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory() 145 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory() 170 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 174 unsigned long end_pfn = start_pfn + nr_pages; in walk_system_ram_range() 179 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); in walk_system_ram_range()
|
D | numa.c | 826 unsigned long start_pfn, end_pfn; in setup_nonnuma() local 836 start_pfn = memblock_region_memory_base_pfn(reg); in setup_nonnuma() 840 memblock_set_node(PFN_PHYS(start_pfn), in setup_nonnuma() 841 PFN_PHYS(end_pfn - start_pfn), in setup_nonnuma() 922 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 924 u64 spanned_pages = end_pfn - start_pfn; in setup_node_data() 932 nid, start_pfn << PAGE_SHIFT, in setup_node_data() 950 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 976 unsigned long start_pfn, end_pfn; in initmem_init() local 978 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in initmem_init() [all …]
|
/linux-4.4.14/arch/sh/kernel/ |
D | setup.c | 191 void __init __add_active_range(unsigned int nid, unsigned long start_pfn, in __add_active_range() argument 199 start = start_pfn << PAGE_SHIFT; in __add_active_range() 209 start_pfn, end_pfn); in __add_active_range() 233 memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), in __add_active_range()
|
/linux-4.4.14/arch/m68k/coldfire/ |
D | m54xx.c | 87 unsigned long start_pfn; in mcf54xx_bootmem_alloc() local 100 start_pfn = PFN_DOWN(memstart); in mcf54xx_bootmem_alloc() 109 memstart += init_bootmem_node(NODE_DATA(0), start_pfn, in mcf54xx_bootmem_alloc()
|
/linux-4.4.14/arch/mips/sgi-ip27/ |
D | ip27-memory.c | 393 unsigned long start_pfn, end_pfn; in node_mem_init() local 395 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 404 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 405 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 413 start_pfn, end_pfn); in node_mem_init() 467 unsigned long start_pfn, end_pfn; in paging_init() local 469 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in paging_init()
|
/linux-4.4.14/arch/unicore32/mm/ |
D | init.c | 131 static void __init uc32_bootmem_init(unsigned long start_pfn, in uc32_bootmem_init() argument 143 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); in uc32_bootmem_init() 153 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); in uc32_bootmem_init() 317 free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 325 start_pg = pfn_to_page(start_pfn - 1) + 1; in free_memmap()
|
/linux-4.4.14/arch/ia64/mm/ |
D | init.c | 508 unsigned long start_pfn) in memmap_init() argument 511 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); in memmap_init() 516 start = pfn_to_page(start_pfn); in memmap_init() 652 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 660 ret = __add_pages(nid, zone, start_pfn, nr_pages); in arch_add_memory() 672 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 677 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory() 678 ret = __remove_pages(zone, start_pfn, nr_pages); in arch_remove_memory()
|
/linux-4.4.14/arch/x86/kernel/ |
D | e820.c | 764 unsigned long start_pfn; in e820_end_pfn() local 774 start_pfn = ei->addr >> PAGE_SHIFT; in e820_end_pfn() 777 if (start_pfn >= limit_pfn) in e820_end_pfn() 1130 unsigned long start_pfn, end_pfn; in memblock_find_dma_reserve() local 1140 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { in memblock_find_dma_reserve() 1141 start_pfn = min(start_pfn, MAX_DMA_PFN); in memblock_find_dma_reserve() 1143 nr_pages += end_pfn - start_pfn; in memblock_find_dma_reserve() 1148 start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); in memblock_find_dma_reserve() 1150 if (start_pfn < end_pfn) in memblock_find_dma_reserve() 1151 nr_free_pages += end_pfn - start_pfn; in memblock_find_dma_reserve()
|
D | tboot.c | 146 static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, in map_tboot_pages() argument 154 for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { in map_tboot_pages() 155 if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) in map_tboot_pages()
|
D | amd_gart_64.c | 738 unsigned long start_pfn, end_pfn; in gart_iommu_init() local 771 start_pfn = PFN_DOWN(aper_base); in gart_iommu_init() 772 if (!pfn_range_is_mapped(start_pfn, end_pfn)) in gart_iommu_init() 773 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); in gart_iommu_init()
|
/linux-4.4.14/drivers/iommu/ |
D | intel-iommu.c | 1083 unsigned long start_pfn, in dma_pte_clear_range() argument 1089 BUG_ON(!domain_pfn_supported(domain, start_pfn)); in dma_pte_clear_range() 1091 BUG_ON(start_pfn > last_pfn); in dma_pte_clear_range() 1096 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); in dma_pte_clear_range() 1098 start_pfn = align_to_level(start_pfn + 1, large_page + 1); in dma_pte_clear_range() 1103 start_pfn += lvl_to_nr_pages(large_page); in dma_pte_clear_range() 1105 } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); in dma_pte_clear_range() 1110 } while (start_pfn && start_pfn <= last_pfn); in dma_pte_clear_range() 1115 unsigned long start_pfn, unsigned long last_pfn) in dma_pte_free_level() argument 1117 pfn = max(start_pfn, pfn); in dma_pte_free_level() [all …]
|
D | iova.c | 26 unsigned long start_pfn, unsigned long pfn_32bit) in init_iova_domain() argument 39 iovad->start_pfn = start_pfn; in init_iova_domain() 138 if ((iovad->start_pfn + size + pad_size) > limit_pfn) { in __alloc_and_insert_iova_range()
|
D | dma-iommu.c | 116 if (iovad->start_pfn) { in iommu_dma_init_domain() 118 base_pfn != iovad->start_pfn || in iommu_dma_init_domain()
|
/linux-4.4.14/arch/microblaze/mm/ |
D | init.c | 190 unsigned long start_pfn, end_pfn; in setup_memory() local 192 start_pfn = memblock_region_memory_base_pfn(reg); in setup_memory() 194 memblock_set_node(start_pfn << PAGE_SHIFT, in setup_memory() 195 (end_pfn - start_pfn) << PAGE_SHIFT, in setup_memory()
|
/linux-4.4.14/arch/m32r/kernel/ |
D | setup.c | 138 unsigned long start_pfn, max_low_pfn, bootmap_size; in setup_memory() local 140 start_pfn = PFN_UP( __pa(_end) ); in setup_memory() 146 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, in setup_memory() 182 (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1) in setup_memory()
|
/linux-4.4.14/kernel/power/ |
D | snapshot.c | 289 unsigned long start_pfn; /* Zone start page frame */ member 443 zone->start_pfn = start; in create_zone_bm_rtree() 643 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) in memory_bm_find_bit() 650 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { in memory_bm_find_bit() 666 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) in memory_bm_find_bit() 670 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; in memory_bm_find_bit() 685 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; in memory_bm_find_bit() 689 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; in memory_bm_find_bit() 808 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn() 813 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; in memory_bm_next_pfn() [all …]
|
/linux-4.4.14/drivers/xen/ |
D | balloon.c | 707 static void __init balloon_add_region(unsigned long start_pfn, in balloon_add_region() argument 718 extra_pfn_end = min(max_pfn, start_pfn + pages); in balloon_add_region() 720 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { in balloon_add_region() 728 balloon_stats.total_pages += extra_pfn_end - start_pfn; in balloon_add_region() 765 balloon_add_region(xen_extra_mem[i].start_pfn, in balloon_init()
|
/linux-4.4.14/arch/s390/kernel/ |
D | setup.c | 493 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) in kdump_mem_notifier() 495 if (arg->start_pfn > PFN_DOWN(crashk_res.end)) in kdump_mem_notifier() 497 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) in kdump_mem_notifier() 649 unsigned long start_pfn = PFN_UP(__pa(&_end)); in reserve_kernel() local 657 memblock_reserve(0, PFN_PHYS(start_pfn)); in reserve_kernel() 660 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) in reserve_kernel()
|
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 709 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehea_create_busmap_callback() local 716 start_pfn = initial_pfn; in ehea_create_busmap_callback() 718 pfn = start_pfn; in ehea_create_busmap_callback() 723 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 724 ret = ehea_update_busmap(start_pfn, nr_pages, in ehea_create_busmap_callback() 731 start_pfn = pfn; in ehea_create_busmap_callback() 737 nr_pages = pfn - start_pfn; in ehea_create_busmap_callback() 738 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); in ehea_create_busmap_callback()
|
D | ehea_main.c | 3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier() 3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
|
/linux-4.4.14/arch/alpha/kernel/ |
D | setup.c | 324 i, cluster->usage, cluster->start_pfn, in setup_memory() 325 cluster->start_pfn + cluster->numpages); in setup_memory() 333 end = cluster->start_pfn + cluster->numpages; in setup_memory() 382 start = cluster->start_pfn; in setup_memory() 416 start = cluster->start_pfn; in setup_memory() 417 end = cluster->start_pfn + cluster->numpages; in setup_memory() 481 if (pfn >= cluster->start_pfn && in page_is_ram() 482 pfn < cluster->start_pfn + cluster->numpages) { in page_is_ram()
|
/linux-4.4.14/arch/sh/include/asm/ |
D | mmzone.h | 41 void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
/linux-4.4.14/arch/x86/include/asm/ |
D | highmem.h | 74 extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
|
D | page_types.h | 66 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
|
/linux-4.4.14/include/xen/ |
D | page.h | 37 unsigned long start_pfn; member
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
D | iommu.c | 423 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, in tce_clearrange_multi_pSeriesLP() argument 434 next = start_pfn << PAGE_SHIFT; in tce_clearrange_multi_pSeriesLP() 463 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, in tce_setrange_multi_pSeriesLP() argument 490 next = start_pfn << PAGE_SHIFT; in tce_setrange_multi_pSeriesLP() 529 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, in tce_setrange_multi_pSeriesLP_walk() argument 532 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); in tce_setrange_multi_pSeriesLP_walk() 1305 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, in iommu_mem_notifier() 1315 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, in iommu_mem_notifier()
|
D | hotplug-memory.c | 135 unsigned long block_sz, start_pfn; in pseries_remove_memblock() local 139 start_pfn = base >> PAGE_SHIFT; in pseries_remove_memblock() 143 if (!pfn_valid(start_pfn)) in pseries_remove_memblock()
|
D | cmm.c | 475 unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn); in cmm_count_pages() 528 unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn); in cmm_mem_going_offline()
|
/linux-4.4.14/arch/arm64/mm/ |
D | init.c | 210 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 218 start_pg = pfn_to_page(start_pfn - 1) + 1; in free_memmap()
|
/linux-4.4.14/drivers/gpu/drm/gma500/ |
D | mmu.h | 78 uint32_t start_pfn,
|
D | mmu.c | 660 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, in psb_mmu_insert_pfn_sequence() argument 685 pte = psb_mmu_mask_pte(start_pfn++, type); in psb_mmu_insert_pfn_sequence()
|
/linux-4.4.14/arch/parisc/kernel/ |
D | inventory.c | 129 pmem_ptr->start_pfn = (start >> PAGE_SHIFT); in set_pmem_entry() 590 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) { in do_memory_inventory()
|
/linux-4.4.14/arch/metag/kernel/ |
D | setup.c | 224 unsigned long start_pfn; in setup_arch() local 371 start_pfn = PFN_UP(__pa(heap_start)); in setup_arch()
|
/linux-4.4.14/arch/alpha/include/asm/ |
D | hwrpb.h | 141 unsigned long start_pfn; member
|
/linux-4.4.14/drivers/s390/char/ |
D | sclp_cmd.c | 203 int arch_get_memory_phys_device(unsigned long start_pfn) in arch_get_memory_phys_device() argument 207 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); in arch_get_memory_phys_device() 350 start = arg->start_pfn << PAGE_SHIFT; in sclp_mem_notifier()
|
/linux-4.4.14/arch/parisc/include/asm/ |
D | page.h | 89 unsigned long start_pfn; member
|
/linux-4.4.14/tools/perf/scripts/python/ |
D | compaction-times.py | 220 common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): argument 227 common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): argument
|
/linux-4.4.14/arch/tile/kernel/ |
D | setup.c | 1670 insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved) in insert_ram_resource() argument 1677 res->start = start_pfn << PAGE_SHIFT; in insert_ram_resource() 1703 u64 start_pfn = node_start_pfn[i]; in request_standard_resources() local 1707 if (start_pfn <= pci_reserve_start_pfn && in request_standard_resources() 1715 insert_ram_resource(start_pfn, end_pfn, 0); in request_standard_resources()
|
D | pci_gx.c | 983 unsigned long start_pfn = node_start_pfn[j]; in pcibios_init() local 985 unsigned long nr_pages = end_pfn - start_pfn; in pcibios_init() 1013 start_pfn << PAGE_SHIFT, in pcibios_init() 1017 (start_pfn << PAGE_SHIFT) + in pcibios_init()
|
/linux-4.4.14/arch/x86/platform/efi/ |
D | efi.c | 580 u64 start_pfn, end_pfn, end; in old_map_region() local 584 start_pfn = PFN_DOWN(md->phys_addr); in old_map_region() 589 if (pfn_range_is_mapped(start_pfn, end_pfn)) { in old_map_region()
|
/linux-4.4.14/arch/arm/mm/ |
D | init.c | 334 free_memmap(unsigned long start_pfn, unsigned long end_pfn) in free_memmap() argument 342 start_pg = pfn_to_page(start_pfn - 1) + 1; in free_memmap()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_mrmw.c | 2369 unsigned long pfn, start_pfn, end_pfn, nr_pages; in ehca_create_busmap_callback() local 2375 start_pfn = initial_pfn; in ehca_create_busmap_callback() 2377 pfn = start_pfn; in ehca_create_busmap_callback() 2382 nr_pages = pfn - start_pfn; in ehca_create_busmap_callback() 2383 ret = ehca_update_busmap(start_pfn, nr_pages); in ehca_create_busmap_callback() 2388 start_pfn = pfn; in ehca_create_busmap_callback() 2394 nr_pages = pfn - start_pfn; in ehca_create_busmap_callback() 2395 return ehca_update_busmap(start_pfn, nr_pages); in ehca_create_busmap_callback()
|
/linux-4.4.14/arch/blackfin/kernel/ |
D | setup.c | 738 unsigned long start_pfn, end_pfn; in setup_bootmem_allocator() local 758 start_pfn = CONFIG_PHY_RAM_BASE_ADDRESS >> PAGE_SHIFT; in setup_bootmem_allocator() 767 start_pfn, end_pfn); in setup_bootmem_allocator()
|
/linux-4.4.14/arch/tile/mm/ |
D | init.c | 870 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 873 return __add_pages(zone, start_pfn, nr_pages); in arch_add_memory()
|
/linux-4.4.14/arch/x86/kernel/cpu/mtrr/ |
D | cleanup.c | 847 real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) in real_trim_memory() argument 851 trim_start = start_pfn; in real_trim_memory()
|
/linux-4.4.14/kernel/ |
D | resource.c | 453 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 461 res.start = (u64) start_pfn << PAGE_SHIFT; in walk_system_ram_range() 462 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; in walk_system_ram_range()
|
/linux-4.4.14/Documentation/ |
D | memory-hotplug.txt | 409 unsigned long start_pfn; 416 start_pfn is start_pfn of online/offline memory.
|
/linux-4.4.14/arch/ia64/include/asm/ |
D | pgtable.h | 548 unsigned long start_pfn);
|
/linux-4.4.14/Documentation/virtual/uml/ |
D | UserModeLinux-HOWTO.txt | 3746 bootmap_size = init_bootmem(start_pfn, end_pfn - start_pfn);
|