start_pfn 142 arch/alpha/include/asm/hwrpb.h unsigned long start_pfn; start_pfn 326 arch/alpha/kernel/setup.c i, cluster->usage, cluster->start_pfn, start_pfn 327 arch/alpha/kernel/setup.c cluster->start_pfn + cluster->numpages); start_pfn 335 arch/alpha/kernel/setup.c end = cluster->start_pfn + cluster->numpages; start_pfn 339 arch/alpha/kernel/setup.c memblock_add(PFN_PHYS(cluster->start_pfn), start_pfn 408 arch/alpha/kernel/setup.c if (pfn >= cluster->start_pfn && start_pfn 409 arch/alpha/kernel/setup.c pfn < cluster->start_pfn + cluster->numpages) { start_pfn 50 arch/alpha/mm/numa.c i, cluster->usage, cluster->start_pfn, start_pfn 51 arch/alpha/mm/numa.c cluster->start_pfn + cluster->numpages); start_pfn 86 arch/alpha/mm/numa.c start = cluster->start_pfn; start_pfn 97 arch/alpha/mm/numa.c i, cluster->usage, cluster->start_pfn, start_pfn 98 arch/alpha/mm/numa.c cluster->start_pfn + cluster->numpages); start_pfn 219 arch/alpha/mm/numa.c unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; start_pfn 220 arch/alpha/mm/numa.c unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_present_pages; start_pfn 222 arch/alpha/mm/numa.c if (dma_local_pfn >= end_pfn - start_pfn) start_pfn 223 arch/alpha/mm/numa.c zones_size[ZONE_DMA] = end_pfn - start_pfn; start_pfn 226 arch/alpha/mm/numa.c zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; start_pfn 229 arch/alpha/mm/numa.c free_area_init_node(nid, zones_size, start_pfn, NULL); start_pfn 327 arch/arm/mm/init.c free_memmap(unsigned long start_pfn, unsigned long end_pfn) start_pfn 335 arch/arm/mm/init.c start_pg = pfn_to_page(start_pfn - 1) + 1; start_pfn 462 arch/arm64/mm/init.c static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) start_pfn 470 arch/arm64/mm/init.c start_pg = pfn_to_page(start_pfn - 1) + 1; start_pfn 1070 arch/arm64/mm/mmu.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 1080 arch/arm64/mm/mmu.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 218 arch/arm64/mm/numa.c static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) start_pfn 225 arch/arm64/mm/numa.c if (start_pfn >= end_pfn) start_pfn 245 arch/arm64/mm/numa.c NODE_DATA(nid)->node_start_pfn = start_pfn; start_pfn 246 arch/arm64/mm/numa.c NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; start_pfn 363 arch/arm64/mm/numa.c unsigned long start_pfn, end_pfn; start_pfn 365 arch/arm64/mm/numa.c get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); start_pfn 366 arch/arm64/mm/numa.c setup_node_data(nid, start_pfn, end_pfn); start_pfn 548 arch/ia64/include/asm/pgtable.h unsigned long start_pfn); start_pfn 527 arch/ia64/mm/init.c unsigned long start_pfn) start_pfn 530 arch/ia64/mm/init.c memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, start_pfn 536 arch/ia64/mm/init.c start = pfn_to_page(start_pfn); start_pfn 675 arch/ia64/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 679 arch/ia64/mm/init.c ret = __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 690 arch/ia64/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 693 arch/ia64/mm/init.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 174 arch/microblaze/mm/init.c unsigned long start_pfn, end_pfn; start_pfn 176 arch/microblaze/mm/init.c start_pfn = memblock_region_memory_base_pfn(reg); start_pfn 178 arch/microblaze/mm/init.c memblock_set_node(start_pfn << PAGE_SHIFT, start_pfn 179 arch/microblaze/mm/init.c (end_pfn - start_pfn) << PAGE_SHIFT, start_pfn 123 arch/mips/loongson64/loongson-3/numa.c u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; start_pfn 137 arch/mips/loongson64/loongson-3/numa.c start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; start_pfn 139 arch/mips/loongson64/loongson-3/numa.c end_pfn = start_pfn + node_psize; start_pfn 144 arch/mips/loongson64/loongson-3/numa.c start_pfn, end_pfn, num_physpages); start_pfn 145 arch/mips/loongson64/loongson-3/numa.c memblock_add_node(PFN_PHYS(start_pfn), start_pfn 146 arch/mips/loongson64/loongson-3/numa.c PFN_PHYS(end_pfn - start_pfn), node); start_pfn 149 arch/mips/loongson64/loongson-3/numa.c start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; start_pfn 151 arch/mips/loongson64/loongson-3/numa.c end_pfn = start_pfn + node_psize; start_pfn 156 arch/mips/loongson64/loongson-3/numa.c start_pfn, end_pfn, num_physpages); start_pfn 157 arch/mips/loongson64/loongson-3/numa.c memblock_add_node(PFN_PHYS(start_pfn), start_pfn 158 arch/mips/loongson64/loongson-3/numa.c PFN_PHYS(end_pfn - start_pfn), node); start_pfn 173 arch/mips/loongson64/loongson-3/numa.c unsigned long start_pfn, end_pfn; start_pfn 179 arch/mips/loongson64/loongson-3/numa.c get_pfn_range_for_nid(node, &start_pfn, &end_pfn); start_pfn 181 arch/mips/loongson64/loongson-3/numa.c node, start_pfn, end_pfn); start_pfn 185 arch/mips/loongson64/loongson-3/numa.c NODE_DATA(node)->node_start_pfn = start_pfn; start_pfn 186 arch/mips/loongson64/loongson-3/numa.c NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; start_pfn 196 arch/mips/loongson64/loongson-3/numa.c memblock_reserve(start_pfn << PAGE_SHIFT, start_pfn 197 arch/mips/loongson64/loongson-3/numa.c ((kernel_end_pfn - start_pfn) << PAGE_SHIFT)); start_pfn 277 arch/mips/mm/init.c static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages, start_pfn 288 arch/mips/mm/init.c cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align); start_pfn 289 arch/mips/mm/init.c cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1; start_pfn 102 arch/mips/mm/ioremap.c static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, start_pfn 108 arch/mips/mm/ioremap.c if (pfn_valid(start_pfn + i) && start_pfn 109 arch/mips/mm/ioremap.c !PageReserved(pfn_to_page(start_pfn + i))) start_pfn 387 arch/mips/sgi-ip27/ip27-memory.c unsigned long start_pfn, end_pfn; start_pfn 389 arch/mips/sgi-ip27/ip27-memory.c get_pfn_range_for_nid(node, &start_pfn, &end_pfn); start_pfn 397 arch/mips/sgi-ip27/ip27-memory.c NODE_DATA(node)->node_start_pfn = start_pfn; start_pfn 398 arch/mips/sgi-ip27/ip27-memory.c NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; start_pfn 90 arch/parisc/include/asm/page.h unsigned long start_pfn; start_pfn 145 arch/parisc/kernel/inventory.c pmem_ptr->start_pfn = (start >> PAGE_SHIFT); start_pfn 606 arch/parisc/kernel/inventory.c if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) { start_pfn 138 arch/parisc/mm/init.c if (pmem_ranges[j-1].start_pfn < start_pfn 139 arch/parisc/mm/init.c pmem_ranges[j].start_pfn) { start_pfn 156 arch/parisc/mm/init.c if (pmem_ranges[i].start_pfn - start_pfn 157 arch/parisc/mm/init.c (pmem_ranges[i-1].start_pfn + start_pfn 162 arch/parisc/mm/init.c pmem_ranges[i].start_pfn - start_pfn 163 arch/parisc/mm/init.c (pmem_ranges[i-1].start_pfn + start_pfn 179 arch/parisc/mm/init.c start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); start_pfn 233 arch/parisc/mm/init.c end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; start_pfn 236 arch/parisc/mm/init.c hole_pages = pmem_ranges[i].start_pfn - end_pfn; start_pfn 238 arch/parisc/mm/init.c pmem_holes[npmem_holes].start_pfn = end_pfn; start_pfn 245 arch/parisc/mm/init.c pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; start_pfn 256 arch/parisc/mm/init.c unsigned long start_pfn; start_pfn 261 arch/parisc/mm/init.c start_pfn = pmem_ranges[i].start_pfn; start_pfn 264 arch/parisc/mm/init.c start = start_pfn << PAGE_SHIFT; start_pfn 270 arch/parisc/mm/init.c if ((start_pfn + npages) > max_pfn) start_pfn 271 arch/parisc/mm/init.c max_pfn = start_pfn + npages; start_pfn 302 arch/parisc/mm/init.c memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), start_pfn 660 arch/parisc/mm/init.c start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; start_pfn 710 arch/parisc/mm/init.c unsigned long start = pmem_ranges[i].start_pfn; start_pfn 1111 arch/powerpc/kernel/fadump.c static void fadump_free_reserved_memory(unsigned long start_pfn, start_pfn 1118 arch/powerpc/kernel/fadump.c PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); start_pfn 1120 arch/powerpc/kernel/fadump.c for (pfn = start_pfn; pfn < end_pfn; pfn++) { start_pfn 179 arch/powerpc/mm/init_64.c unsigned long start_pfn = page_to_pfn((struct page *)start); start_pfn 181 arch/powerpc/mm/init_64.c if ((start_pfn + nr_pfn) > altmap->end_pfn) start_pfn 184 arch/powerpc/mm/init_64.c if (start_pfn < altmap->base_pfn) start_pfn 131 arch/powerpc/mm/mem.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 147 arch/powerpc/mm/mem.c return __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 153 arch/powerpc/mm/mem.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 157 arch/powerpc/mm/mem.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 744 arch/powerpc/mm/numa.c unsigned long start_pfn, end_pfn; start_pfn 754 arch/powerpc/mm/numa.c start_pfn = memblock_region_memory_base_pfn(reg); start_pfn 758 arch/powerpc/mm/numa.c memblock_set_node(PFN_PHYS(start_pfn), start_pfn 759 arch/powerpc/mm/numa.c PFN_PHYS(end_pfn - start_pfn), start_pfn 801 arch/powerpc/mm/numa.c static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) start_pfn 803 arch/powerpc/mm/numa.c u64 spanned_pages = end_pfn - start_pfn; start_pfn 826 arch/powerpc/mm/numa.c NODE_DATA(nid)->node_start_pfn = start_pfn; start_pfn 891 arch/powerpc/mm/numa.c unsigned long start_pfn, end_pfn; start_pfn 893 arch/powerpc/mm/numa.c get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); start_pfn 894 arch/powerpc/mm/numa.c setup_node_data(nid, start_pfn, end_pfn); start_pfn 71 arch/powerpc/platforms/powernv/memtrace.c static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) start_pfn 73 arch/powerpc/platforms/powernv/memtrace.c const unsigned long start = PFN_PHYS(start_pfn); start_pfn 82 arch/powerpc/platforms/powernv/memtrace.c if (offline_pages(start_pfn, nr_pages)) { start_pfn 97 arch/powerpc/platforms/powernv/memtrace.c u64 start_pfn, end_pfn, nr_pages, pfn; start_pfn 104 arch/powerpc/platforms/powernv/memtrace.c start_pfn = node_start_pfn(nid); start_pfn 112 arch/powerpc/platforms/powernv/memtrace.c for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) { start_pfn 500 arch/powerpc/platforms/pseries/cmm.c unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn); start_pfn 553 arch/powerpc/platforms/pseries/cmm.c unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn); start_pfn 284 arch/powerpc/platforms/pseries/hotplug-memory.c unsigned long block_sz, start_pfn; start_pfn 288 arch/powerpc/platforms/pseries/hotplug-memory.c start_pfn = base >> PAGE_SHIFT; start_pfn 292 arch/powerpc/platforms/pseries/hotplug-memory.c if (!pfn_valid(start_pfn)) start_pfn 355 arch/powerpc/platforms/pseries/iommu.c static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, start_pfn 366 arch/powerpc/platforms/pseries/iommu.c next = start_pfn << PAGE_SHIFT; start_pfn 395 arch/powerpc/platforms/pseries/iommu.c static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, start_pfn 407 arch/powerpc/platforms/pseries/iommu.c unsigned long dmastart = (start_pfn << PAGE_SHIFT) + start_pfn 411 arch/powerpc/platforms/pseries/iommu.c void *uaddr = __va(start_pfn << PAGE_SHIFT); start_pfn 435 arch/powerpc/platforms/pseries/iommu.c next = start_pfn << PAGE_SHIFT; start_pfn 474 arch/powerpc/platforms/pseries/iommu.c static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, start_pfn 477 arch/powerpc/platforms/pseries/iommu.c return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); start_pfn 1244 arch/powerpc/platforms/pseries/iommu.c ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, start_pfn 1254 arch/powerpc/platforms/pseries/iommu.c ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, start_pfn 137 arch/riscv/mm/init.c unsigned long start_pfn = memblock_region_memory_base_pfn(reg); start_pfn 140 arch/riscv/mm/init.c memblock_set_node(PFN_PHYS(start_pfn), start_pfn 141 arch/riscv/mm/init.c PFN_PHYS(end_pfn - start_pfn), start_pfn 46 arch/s390/include/asm/diag.h static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) start_pfn 50 arch/s390/include/asm/diag.h start_addr = start_pfn << PAGE_SHIFT; start_pfn 51 arch/s390/include/asm/diag.h end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; start_pfn 619 arch/s390/kernel/setup.c if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) start_pfn 621 arch/s390/kernel/setup.c if (arg->start_pfn > PFN_DOWN(crashk_res.end)) start_pfn 623 arch/s390/kernel/setup.c if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start)) start_pfn 832 arch/s390/kernel/setup.c unsigned long start_pfn = PFN_UP(__pa(_end)); start_pfn 835 arch/s390/kernel/setup.c memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) start_pfn 250 arch/s390/mm/init.c mem_data.start = arg->start_pfn << PAGE_SHIFT; start_pfn 272 arch/s390/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 283 arch/s390/mm/init.c rc = __add_pages(nid, start_pfn, size_pages, restrictions); start_pfn 292 arch/s390/mm/init.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 295 arch/s390/mm/init.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 103 arch/s390/numa/numa.c unsigned long start_pfn, end_pfn; start_pfn 107 arch/s390/numa/numa.c start_pfn = ULONG_MAX; start_pfn 110 arch/s390/numa/numa.c if (t_start < start_pfn) start_pfn 111 arch/s390/numa/numa.c start_pfn = t_start; start_pfn 115 arch/s390/numa/numa.c NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; start_pfn 42 arch/sh/include/asm/mmzone.h void __init __add_active_range(unsigned int nid, unsigned long start_pfn, start_pfn 196 arch/sh/kernel/setup.c void __init __add_active_range(unsigned int nid, unsigned long start_pfn, start_pfn 204 arch/sh/kernel/setup.c start = start_pfn << PAGE_SHIFT; start_pfn 214 arch/sh/kernel/setup.c start_pfn, end_pfn); start_pfn 238 arch/sh/kernel/setup.c memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), start_pfn 201 arch/sh/mm/init.c unsigned long start_pfn, end_pfn; start_pfn 203 arch/sh/mm/init.c get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); start_pfn 214 arch/sh/mm/init.c NODE_DATA(nid)->node_start_pfn = start_pfn; start_pfn 215 arch/sh/mm/init.c NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; start_pfn 224 arch/sh/mm/init.c unsigned long start_pfn, end_pfn; start_pfn 225 arch/sh/mm/init.c start_pfn = memblock_region_memory_base_pfn(reg); start_pfn 227 arch/sh/mm/init.c __add_active_range(0, start_pfn, end_pfn); start_pfn 247 arch/sh/mm/init.c unsigned long start_pfn; start_pfn 255 arch/sh/mm/init.c start_pfn = PFN_UP(__pa(_end)); start_pfn 263 arch/sh/mm/init.c memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); start_pfn 411 arch/sh/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 416 arch/sh/mm/init.c ret = __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 435 arch/sh/mm/init.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 438 arch/sh/mm/init.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 28 arch/sh/mm/numa.c unsigned long start_pfn, end_pfn; start_pfn 33 arch/sh/mm/numa.c start_pfn = PFN_DOWN(start); start_pfn 41 arch/sh/mm/numa.c __add_active_range(nid, start_pfn, end_pfn); start_pfn 51 arch/sh/mm/numa.c NODE_DATA(nid)->node_start_pfn = start_pfn; start_pfn 52 arch/sh/mm/numa.c NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; start_pfn 66 arch/sparc/mm/init_32.c unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; start_pfn 72 arch/sparc/mm/init_32.c if (start_pfn < max_low_pfn) start_pfn 73 arch/sparc/mm/init_32.c start_pfn = max_low_pfn; start_pfn 75 arch/sparc/mm/init_32.c nr += end_pfn - start_pfn; start_pfn 135 arch/sparc/mm/init_32.c unsigned long start_pfn, bytes_avail, size; start_pfn 173 arch/sparc/mm/init_32.c start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); start_pfn 176 arch/sparc/mm/init_32.c start_pfn >>= PAGE_SHIFT; start_pfn 194 arch/sparc/mm/init_32.c size = (start_pfn << PAGE_SHIFT) - phys_base; start_pfn 233 arch/sparc/mm/init_32.c static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) start_pfn 238 arch/sparc/mm/init_32.c printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); start_pfn 241 arch/sparc/mm/init_32.c for (tmp = start_pfn; tmp < end_pfn; tmp++) start_pfn 281 arch/sparc/mm/init_32.c unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; start_pfn 287 arch/sparc/mm/init_32.c if (start_pfn < highstart_pfn) start_pfn 288 arch/sparc/mm/init_32.c start_pfn = highstart_pfn; start_pfn 290 arch/sparc/mm/init_32.c map_high_region(start_pfn, end_pfn); start_pfn 1090 arch/sparc/mm/init_64.c unsigned long start_pfn, end_pfn; start_pfn 1105 arch/sparc/mm/init_64.c get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); start_pfn 1106 arch/sparc/mm/init_64.c p->node_start_pfn = start_pfn; start_pfn 1107 arch/sparc/mm/init_64.c p->node_spanned_pages = end_pfn - start_pfn; start_pfn 200 arch/unicore32/mm/init.c free_memmap(unsigned long start_pfn, unsigned long end_pfn) start_pfn 208 arch/unicore32/mm/init.c start_pg = pfn_to_page(start_pfn - 1) + 1; start_pfn 75 arch/x86/include/asm/highmem.h extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, start_pfn 74 arch/x86/include/asm/page_types.h bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); start_pfn 715 arch/x86/kernel/amd_gart_64.c unsigned long start_pfn, end_pfn; start_pfn 747 arch/x86/kernel/amd_gart_64.c start_pfn = PFN_DOWN(aper_base); start_pfn 748 arch/x86/kernel/amd_gart_64.c if (!pfn_range_is_mapped(start_pfn, end_pfn)) start_pfn 749 arch/x86/kernel/amd_gart_64.c init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); start_pfn 852 arch/x86/kernel/cpu/mtrr/cleanup.c real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) start_pfn 856 arch/x86/kernel/cpu/mtrr/cleanup.c trim_start = start_pfn; start_pfn 823 arch/x86/kernel/e820.c unsigned long start_pfn; start_pfn 829 arch/x86/kernel/e820.c start_pfn = entry->addr >> PAGE_SHIFT; start_pfn 832 arch/x86/kernel/e820.c if (start_pfn >= limit_pfn) start_pfn 142 arch/x86/kernel/tboot.c static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn, start_pfn 150 arch/x86/kernel/tboot.c for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) { start_pfn 151 arch/x86/kernel/tboot.c if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC)) start_pfn 262 arch/x86/mm/init.c unsigned long start_pfn, unsigned long end_pfn, start_pfn 265 arch/x86/mm/init.c if (start_pfn < end_pfn) { start_pfn 268 arch/x86/mm/init.c mr[nr_range].start = start_pfn<<PAGE_SHIFT; start_pfn 340 arch/x86/mm/init.c unsigned long start_pfn, end_pfn, limit_pfn; start_pfn 347 arch/x86/mm/init.c pfn = start_pfn = PFN_DOWN(start); start_pfn 364 arch/x86/mm/init.c if (start_pfn < end_pfn) { start_pfn 365 arch/x86/mm/init.c nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); start_pfn 370 arch/x86/mm/init.c start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); start_pfn 379 arch/x86/mm/init.c if (start_pfn < end_pfn) { start_pfn 380 arch/x86/mm/init.c nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, start_pfn 387 arch/x86/mm/init.c start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); start_pfn 389 arch/x86/mm/init.c if (start_pfn < end_pfn) { start_pfn 390 arch/x86/mm/init.c nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, start_pfn 397 arch/x86/mm/init.c start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); start_pfn 399 arch/x86/mm/init.c if (start_pfn < end_pfn) { start_pfn 400 arch/x86/mm/init.c nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, start_pfn 407 arch/x86/mm/init.c start_pfn = pfn; start_pfn 409 arch/x86/mm/init.c nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); start_pfn 439 arch/x86/mm/init.c static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) start_pfn 442 arch/x86/mm/init.c nr_pfn_mapped, start_pfn, end_pfn); start_pfn 447 arch/x86/mm/init.c if (start_pfn < (1UL<<(32-PAGE_SHIFT))) start_pfn 452 arch/x86/mm/init.c bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) start_pfn 457 arch/x86/mm/init.c if ((start_pfn >= pfn_mapped[i].start) && start_pfn 508 arch/x86/mm/init.c unsigned long start_pfn, end_pfn; start_pfn 512 arch/x86/mm/init.c for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn 513 arch/x86/mm/init.c u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); start_pfn 899 arch/x86/mm/init.c unsigned long start_pfn, end_pfn; start_pfn 909 arch/x86/mm/init.c for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { start_pfn 910 arch/x86/mm/init.c start_pfn = min(start_pfn, MAX_DMA_PFN); start_pfn 913 arch/x86/mm/init.c nr_pages += end_pfn - start_pfn; start_pfn 923 arch/x86/mm/init.c start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN); start_pfn 926 arch/x86/mm/init.c if (start_pfn < end_pfn) start_pfn 927 arch/x86/mm/init.c nr_free_pages += end_pfn - start_pfn; start_pfn 259 arch/x86/mm/init_32.c unsigned long start_pfn, end_pfn; start_pfn 269 arch/x86/mm/init_32.c start_pfn = start >> PAGE_SHIFT; start_pfn 293 arch/x86/mm/init_32.c pfn = start_pfn; start_pfn 435 arch/x86/mm/init_32.c unsigned long start_pfn, unsigned long end_pfn) start_pfn 442 arch/x86/mm/init_32.c start_pfn, end_pfn); start_pfn 444 arch/x86/mm/init_32.c start_pfn, end_pfn); start_pfn 857 arch/x86/mm/init_32.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 860 arch/x86/mm/init_32.c return __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 866 arch/x86/mm/init_32.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 869 arch/x86/mm/init_32.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 846 arch/x86/mm/init_64.c int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, start_pfn 851 arch/x86/mm/init_64.c ret = __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 855 arch/x86/mm/init_64.c update_end_of_memory_vars(start_pfn << PAGE_SHIFT, start_pfn 864 arch/x86/mm/init_64.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 869 arch/x86/mm/init_64.c return add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 1213 arch/x86/mm/init_64.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 1216 arch/x86/mm/init_64.c __remove_pages(start_pfn, nr_pages, altmap); start_pfn 71 arch/x86/mm/ioremap.c unsigned long start_pfn, stop_pfn; start_pfn 77 arch/x86/mm/ioremap.c start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT; start_pfn 79 arch/x86/mm/ioremap.c if (stop_pfn > start_pfn) { start_pfn 80 arch/x86/mm/ioremap.c for (i = 0; i < (stop_pfn - start_pfn); ++i) start_pfn 81 arch/x86/mm/ioremap.c if (pfn_valid(start_pfn + i) && start_pfn 82 arch/x86/mm/ioremap.c !PageReserved(pfn_to_page(start_pfn + i))) start_pfn 33 arch/x86/mm/numa_emulation.c unsigned long start_pfn = PFN_UP(start); start_pfn 36 arch/x86/mm/numa_emulation.c if (start_pfn < end_pfn) start_pfn 37 arch/x86/mm/numa_emulation.c return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn)); start_pfn 427 arch/x86/mm/pat.c unsigned long start_pfn = start >> PAGE_SHIFT; start_pfn 429 arch/x86/mm/pat.c struct pagerange_state state = {start_pfn, 0, 0}; start_pfn 438 arch/x86/mm/pat.c if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) start_pfn 439 arch/x86/mm/pat.c start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; start_pfn 441 arch/x86/mm/pat.c if (start_pfn < end_pfn) { start_pfn 442 arch/x86/mm/pat.c ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, start_pfn 614 arch/x86/platform/efi/efi.c u64 start_pfn, end_pfn, end; start_pfn 618 arch/x86/platform/efi/efi.c start_pfn = PFN_DOWN(md->phys_addr); start_pfn 623 arch/x86/platform/efi/efi.c if (pfn_range_is_mapped(start_pfn, end_pfn)) { start_pfn 94 arch/x86/xen/setup.c static void __init xen_add_extra_mem(unsigned long start_pfn, start_pfn 106 arch/x86/xen/setup.c xen_extra_mem[i].start_pfn = start_pfn; start_pfn 111 arch/x86/xen/setup.c if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == start_pfn 112 arch/x86/xen/setup.c start_pfn) { start_pfn 120 arch/x86/xen/setup.c memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); start_pfn 123 arch/x86/xen/setup.c static void __init xen_del_extra_mem(unsigned long start_pfn, start_pfn 130 arch/x86/xen/setup.c start_r = xen_extra_mem[i].start_pfn; start_pfn 134 arch/x86/xen/setup.c if (start_r == start_pfn) { start_pfn 136 arch/x86/xen/setup.c xen_extra_mem[i].start_pfn += n_pfns; start_pfn 141 arch/x86/xen/setup.c if (start_r + size_r == start_pfn + n_pfns) { start_pfn 147 arch/x86/xen/setup.c if (start_pfn > start_r && start_pfn < start_r + size_r) { start_pfn 148 arch/x86/xen/setup.c BUG_ON(start_pfn + n_pfns > start_r + size_r); start_pfn 149 arch/x86/xen/setup.c xen_extra_mem[i].n_pfns = start_pfn - start_r; start_pfn 151 arch/x86/xen/setup.c xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - start_pfn 152 arch/x86/xen/setup.c (start_pfn + n_pfns)); start_pfn 156 arch/x86/xen/setup.c memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); start_pfn 169 arch/x86/xen/setup.c if (pfn >= xen_extra_mem[i].start_pfn && start_pfn 170 arch/x86/xen/setup.c pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns) start_pfn 188 arch/x86/xen/setup.c pfn_s = xen_extra_mem[i].start_pfn; start_pfn 254 arch/x86/xen/setup.c static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, start_pfn 260 arch/x86/xen/setup.c WARN_ON(start_pfn > end_pfn); start_pfn 264 arch/x86/xen/setup.c for (pfn = start_pfn; pfn < end; pfn++) { start_pfn 282 arch/x86/xen/setup.c set_phys_range_identity(start_pfn, end_pfn); start_pfn 334 arch/x86/xen/setup.c unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) start_pfn 339 arch/x86/xen/setup.c unsigned long ident_end_pfn = start_pfn + size; start_pfn 347 arch/x86/xen/setup.c for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; start_pfn 388 arch/x86/xen/setup.c unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, start_pfn 393 arch/x86/xen/setup.c unsigned long n = end_pfn - start_pfn; start_pfn 399 arch/x86/xen/setup.c unsigned long cur_pfn = start_pfn + i; start_pfn 435 arch/x86/xen/setup.c for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) start_pfn 444 arch/x86/xen/setup.c unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, start_pfn 447 arch/x86/xen/setup.c if (start_pfn >= nr_pages) start_pfn 450 arch/x86/xen/setup.c return remap_pages + min(end_pfn, nr_pages) - start_pfn; start_pfn 454 arch/x86/xen/setup.c unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, start_pfn 476 arch/x86/xen/setup.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 482 arch/x86/xen/setup.c if (start_pfn < end_pfn) start_pfn 483 arch/x86/xen/setup.c ret_val = func(start_pfn, end_pfn, nr_pages, start_pfn 174 drivers/base/memory.c static bool pages_correctly_probed(unsigned long start_pfn) start_pfn 176 drivers/base/memory.c unsigned long section_nr = pfn_to_section_nr(start_pfn); start_pfn 178 drivers/base/memory.c unsigned long pfn = start_pfn; start_pfn 216 drivers/base/memory.c unsigned long start_pfn; start_pfn 220 drivers/base/memory.c start_pfn = section_nr_to_pfn(start_section_nr); start_pfn 224 drivers/base/memory.c if (!pages_correctly_probed(start_pfn)) start_pfn 227 drivers/base/memory.c ret = online_pages(start_pfn, nr_pages, online_type); start_pfn 230 drivers/base/memory.c ret = offline_pages(start_pfn, nr_pages); start_pfn 365 drivers/base/memory.c static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, start_pfn 371 drivers/base/memory.c zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); start_pfn 382 drivers/base/memory.c unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); start_pfn 397 drivers/base/memory.c if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, start_pfn 400 drivers/base/memory.c start_pfn = valid_start_pfn; start_pfn 401 drivers/base/memory.c strcat(buf, page_zone(pfn_to_page(start_pfn))->name); start_pfn 406 drivers/base/memory.c default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); start_pfn 409 drivers/base/memory.c print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, start_pfn 411 drivers/base/memory.c print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, start_pfn 558 drivers/base/memory.c int __weak arch_get_memory_phys_device(unsigned long start_pfn) start_pfn 632 drivers/base/memory.c unsigned long start_pfn; start_pfn 646 drivers/base/memory.c start_pfn = section_nr_to_pfn(mem->start_section_nr); start_pfn 647 drivers/base/memory.c mem->phys_device = arch_get_memory_phys_device(start_pfn); start_pfn 766 drivers/base/node.c unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); start_pfn 767 drivers/base/node.c unsigned long end_pfn = start_pfn + memory_block_pfns - 1; start_pfn 771 drivers/base/node.c for (pfn = start_pfn; pfn <= end_pfn; pfn++) { start_pfn 832 drivers/base/node.c int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) start_pfn 834 drivers/base/node.c return walk_memory_blocks(PFN_PHYS(start_pfn), start_pfn 835 drivers/base/node.c PFN_PHYS(end_pfn - start_pfn), (void *)&nid, start_pfn 650 drivers/gpu/drm/gma500/mmu.c int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, start_pfn 675 drivers/gpu/drm/gma500/mmu.c pte = psb_mmu_mask_pte(start_pfn++, type); start_pfn 71 drivers/gpu/drm/gma500/mmu.h uint32_t start_pfn, start_pfn 432 drivers/hv/hv_balloon.c unsigned long start_pfn; start_pfn 445 drivers/hv/hv_balloon.c unsigned long start_pfn; start_pfn 583 drivers/hv/hv_balloon.c if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn)) start_pfn 590 drivers/hv/hv_balloon.c static unsigned long hv_page_offline_check(unsigned long start_pfn, start_pfn 593 drivers/hv/hv_balloon.c unsigned long pfn = start_pfn, count = 0; start_pfn 597 drivers/hv/hv_balloon.c while (pfn < start_pfn + nr_pages) { start_pfn 604 drivers/hv/hv_balloon.c while ((pfn >= has->start_pfn) && start_pfn 606 drivers/hv/hv_balloon.c (pfn < start_pfn + nr_pages)) { start_pfn 643 drivers/hv/hv_balloon.c pfn_count = hv_page_offline_check(mem->start_pfn, start_pfn 692 drivers/hv/hv_balloon.c unsigned long start_pfn, unsigned long size) start_pfn 696 drivers/hv/hv_balloon.c pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); start_pfn 698 drivers/hv/hv_balloon.c hv_page_online_one(has, pfn_to_page(start_pfn + i)); start_pfn 707 drivers/hv/hv_balloon.c unsigned long start_pfn; start_pfn 713 drivers/hv/hv_balloon.c start_pfn = start + (i * HA_CHUNK); start_pfn 732 drivers/hv/hv_balloon.c nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); start_pfn 733 drivers/hv/hv_balloon.c ret = add_memory(nid, PFN_PHYS((start_pfn)), start_pfn 778 drivers/hv/hv_balloon.c if ((pfn < has->start_pfn) || start_pfn 788 drivers/hv/hv_balloon.c static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) start_pfn 802 drivers/hv/hv_balloon.c if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) start_pfn 809 drivers/hv/hv_balloon.c if (has->covered_end_pfn != start_pfn) { start_pfn 817 drivers/hv/hv_balloon.c gap->start_pfn = has->covered_end_pfn; start_pfn 818 drivers/hv/hv_balloon.c gap->end_pfn = start_pfn; start_pfn 821 drivers/hv/hv_balloon.c has->covered_end_pfn = start_pfn; start_pfn 828 drivers/hv/hv_balloon.c if ((start_pfn + pfn_cnt) > has->end_pfn) { start_pfn 829 drivers/hv/hv_balloon.c residual = (start_pfn + pfn_cnt - has->end_pfn); start_pfn 851 drivers/hv/hv_balloon.c unsigned long start_pfn = pg_start; start_pfn 868 drivers/hv/hv_balloon.c if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) start_pfn 873 drivers/hv/hv_balloon.c if (start_pfn < has->ha_end_pfn) { start_pfn 879 drivers/hv/hv_balloon.c pgs_ol = has->ha_end_pfn - start_pfn; start_pfn 893 drivers/hv/hv_balloon.c if (start_pfn > has->start_pfn && start_pfn 894 drivers/hv/hv_balloon.c online_section_nr(pfn_to_section_nr(start_pfn))) start_pfn 895 drivers/hv/hv_balloon.c hv_bring_pgs_online(has, start_pfn, pgs_ol); start_pfn 964 drivers/hv/hv_balloon.c ha_region->start_pfn = rg_start; start_pfn 331 drivers/iommu/dma-iommu.c if (iovad->start_pfn) { start_pfn 333 drivers/iommu/dma-iommu.c base_pfn != iovad->start_pfn) { start_pfn 969 drivers/iommu/intel-iommu.c unsigned long start_pfn, start_pfn 975 drivers/iommu/intel-iommu.c BUG_ON(!domain_pfn_supported(domain, start_pfn)); start_pfn 977 drivers/iommu/intel-iommu.c BUG_ON(start_pfn > last_pfn); start_pfn 982 drivers/iommu/intel-iommu.c first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); start_pfn 984 drivers/iommu/intel-iommu.c start_pfn = align_to_level(start_pfn + 1, large_page + 1); start_pfn 989 drivers/iommu/intel-iommu.c start_pfn += lvl_to_nr_pages(large_page); start_pfn 991 drivers/iommu/intel-iommu.c } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); start_pfn 996 drivers/iommu/intel-iommu.c } while (start_pfn && start_pfn <= last_pfn); start_pfn 1001 drivers/iommu/intel-iommu.c unsigned long pfn, unsigned long start_pfn, start_pfn 1004 drivers/iommu/intel-iommu.c pfn = max(start_pfn, pfn); start_pfn 1019 drivers/iommu/intel-iommu.c level_pte, level_pfn, start_pfn, start_pfn 1027 drivers/iommu/intel-iommu.c if (level < retain_level && !(start_pfn > level_pfn || start_pfn 1043 drivers/iommu/intel-iommu.c unsigned long start_pfn, start_pfn 1047 drivers/iommu/intel-iommu.c BUG_ON(!domain_pfn_supported(domain, start_pfn)); start_pfn 1049 drivers/iommu/intel-iommu.c BUG_ON(start_pfn > last_pfn); start_pfn 1051 drivers/iommu/intel-iommu.c dma_pte_clear_range(domain, start_pfn, last_pfn); start_pfn 1055 drivers/iommu/intel-iommu.c domain->pgd, 0, start_pfn, last_pfn); start_pfn 1058 drivers/iommu/intel-iommu.c if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { start_pfn 1096 drivers/iommu/intel-iommu.c unsigned long start_pfn, start_pfn 1102 drivers/iommu/intel-iommu.c pfn = max(start_pfn, pfn); start_pfn 1114 drivers/iommu/intel-iommu.c if (start_pfn <= level_pfn && start_pfn 1129 drivers/iommu/intel-iommu.c level_pfn, start_pfn, last_pfn, start_pfn 1147 drivers/iommu/intel-iommu.c unsigned long start_pfn, start_pfn 1152 drivers/iommu/intel-iommu.c BUG_ON(!domain_pfn_supported(domain, start_pfn)); start_pfn 1154 drivers/iommu/intel-iommu.c BUG_ON(start_pfn > last_pfn); start_pfn 1158 drivers/iommu/intel-iommu.c domain->pgd, 0, start_pfn, last_pfn, NULL); start_pfn 1161 drivers/iommu/intel-iommu.c if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { start_pfn 2753 drivers/iommu/intel-iommu.c unsigned long start_pfn, end_pfn; start_pfn 2756 drivers/iommu/intel-iommu.c for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { start_pfn 2758 drivers/iommu/intel-iommu.c PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); start_pfn 3575 drivers/iommu/intel-iommu.c unsigned long start_pfn, last_pfn; start_pfn 3590 drivers/iommu/intel-iommu.c start_pfn = mm_to_dma_pfn(iova_pfn); start_pfn 3591 drivers/iommu/intel-iommu.c last_pfn = start_pfn + nrpages - 1; start_pfn 3596 drivers/iommu/intel-iommu.c freelist = domain_unmap(domain, start_pfn, last_pfn); start_pfn 3599 drivers/iommu/intel-iommu.c iommu_flush_iotlb_psi(iommu, domain, start_pfn, start_pfn 4658 drivers/iommu/intel-iommu.c start = mhp->start_pfn << PAGE_SHIFT; start_pfn 4659 drivers/iommu/intel-iommu.c end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; start_pfn 4669 drivers/iommu/intel-iommu.c start_vpfn = mm_to_dma_pfn(mhp->start_pfn); start_pfn 4670 drivers/iommu/intel-iommu.c last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); start_pfn 5483 drivers/iommu/intel-iommu.c unsigned long start_pfn, last_pfn; start_pfn 5494 drivers/iommu/intel-iommu.c start_pfn = iova >> VTD_PAGE_SHIFT; start_pfn 5497 drivers/iommu/intel-iommu.c freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); start_pfn 5499 drivers/iommu/intel-iommu.c npages = last_pfn - start_pfn + 1; start_pfn 5503 drivers/iommu/intel-iommu.c start_pfn, npages, !freelist, 0); start_pfn 31 drivers/iommu/iova.c unsigned long start_pfn) start_pfn 45 drivers/iommu/iova.c iovad->start_pfn = start_pfn; start_pfn 209 drivers/iommu/iova.c if (limit_pfn < size || new_pfn < iovad->start_pfn) { start_pfn 3255 drivers/net/ethernet/ibm/ehea/ehea_main.c if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) start_pfn 3263 drivers/net/ethernet/ibm/ehea/ehea_main.c if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) start_pfn 689 drivers/net/ethernet/ibm/ehea/ehea_qmr.c unsigned long pfn, start_pfn, end_pfn, nr_pages; start_pfn 696 drivers/net/ethernet/ibm/ehea/ehea_qmr.c start_pfn = initial_pfn; start_pfn 698 drivers/net/ethernet/ibm/ehea/ehea_qmr.c pfn = start_pfn; start_pfn 703 drivers/net/ethernet/ibm/ehea/ehea_qmr.c nr_pages = pfn - start_pfn; start_pfn 704 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ret = ehea_update_busmap(start_pfn, nr_pages, start_pfn 711 drivers/net/ethernet/ibm/ehea/ehea_qmr.c start_pfn = pfn; start_pfn 717 drivers/net/ethernet/ibm/ehea/ehea_qmr.c nr_pages = pfn - start_pfn; start_pfn 718 drivers/net/ethernet/ibm/ehea/ehea_qmr.c return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); start_pfn 180 drivers/s390/char/sclp_cmd.c int arch_get_memory_phys_device(unsigned long start_pfn) start_pfn 184 drivers/s390/char/sclp_cmd.c return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); start_pfn 327 drivers/s390/char/sclp_cmd.c start = arg->start_pfn << PAGE_SHIFT; start_pfn 370 drivers/xen/balloon.c unsigned long start_pfn = page_to_pfn(page); start_pfn 373 drivers/xen/balloon.c pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); start_pfn 376 drivers/xen/balloon.c p = pfn_to_page(start_pfn + i); start_pfn 666 drivers/xen/balloon.c static void __init balloon_add_region(unsigned long start_pfn, start_pfn 676 drivers/xen/balloon.c extra_pfn_end = min(max_pfn, start_pfn + pages); start_pfn 678 drivers/xen/balloon.c for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { start_pfn 685 drivers/xen/balloon.c balloon_stats.total_pages += extra_pfn_end - start_pfn; start_pfn 729 drivers/xen/balloon.c balloon_add_region(xen_extra_mem[i].start_pfn, start_pfn 852 drivers/xen/grant-table.c unsigned long pfn, start_pfn; start_pfn 870 drivers/xen/grant-table.c start_pfn = __phys_to_pfn(args->dev_bus_addr); start_pfn 871 drivers/xen/grant-table.c for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages; start_pfn 484 include/linux/hugetlb.h extern int dissolve_free_huge_pages(unsigned long start_pfn, start_pfn 706 include/linux/hugetlb.h static inline int dissolve_free_huge_pages(unsigned long start_pfn, start_pfn 280 include/linux/ioport.h walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, start_pfn 74 include/linux/iova.h unsigned long start_pfn; /* Lower limit for this domain */ start_pfn 157 include/linux/iova.h unsigned long start_pfn); start_pfn 235 include/linux/iova.h unsigned long start_pfn) start_pfn 220 include/linux/memblock.h int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, start_pfn 38 include/linux/memory.h int arch_get_memory_phys_device(unsigned long start_pfn); start_pfn 51 include/linux/memory.h unsigned long start_pfn; start_pfn 66 include/linux/memory.h unsigned long start_pfn; /* Start of range to check */ start_pfn 98 include/linux/memory_hotplug.h extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, start_pfn 100 include/linux/memory_hotplug.h extern unsigned long __offline_isolated_pages(unsigned long start_pfn, start_pfn 128 include/linux/memory_hotplug.h extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, start_pfn 132 include/linux/memory_hotplug.h extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, start_pfn 136 include/linux/memory_hotplug.h static inline int add_pages(int nid, unsigned long start_pfn, start_pfn 139 include/linux/memory_hotplug.h return __add_pages(nid, start_pfn, nr_pages, restrictions); start_pfn 142 include/linux/memory_hotplug.h int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, start_pfn 316 include/linux/memory_hotplug.h extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); start_pfn 329 include/linux/memory_hotplug.h static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) start_pfn 346 include/linux/memory_hotplug.h extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, start_pfn 349 include/linux/memory_hotplug.h unsigned long start_pfn, start_pfn 361 include/linux/memory_hotplug.h extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, start_pfn 2175 include/linux/mm.h unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, start_pfn 2177 include/linux/mm.h extern unsigned long absent_pages_in_range(unsigned long start_pfn, start_pfn 2180 include/linux/mm.h unsigned long *start_pfn, unsigned long *end_pfn); start_pfn 616 include/linux/mmzone.h unsigned long start_pfn, unsigned long nr_pages) start_pfn 620 include/linux/mmzone.h if (start_pfn >= zone_end_pfn(zone) || start_pfn 621 include/linux/mmzone.h start_pfn + nr_pages <= zone->zone_start_pfn) start_pfn 827 include/linux/mmzone.h extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, start_pfn 1312 include/linux/mmzone.h void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn); start_pfn 1314 include/linux/mmzone.h void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn); start_pfn 102 include/linux/node.h extern int link_mem_sections(int nid, unsigned long start_pfn, start_pfn 105 include/linux/node.h static inline int link_mem_sections(int nid, unsigned long start_pfn, start_pfn 124 include/linux/node.h unsigned long start_pfn = pgdat->node_start_pfn; start_pfn 125 include/linux/node.h unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; start_pfn 131 include/linux/node.h error = link_mem_sections(nid, start_pfn, end_pfn); start_pfn 46 include/linux/page-isolation.h start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_pfn 54 include/linux/page-isolation.h undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_pfn 60 include/linux/page-isolation.h int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, start_pfn 17 include/trace/events/compaction.h unsigned long start_pfn, start_pfn 22 include/trace/events/compaction.h TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), start_pfn 25 include/trace/events/compaction.h __field(unsigned long, start_pfn) start_pfn 32 include/trace/events/compaction.h __entry->start_pfn = start_pfn; start_pfn 39 include/trace/events/compaction.h __entry->start_pfn, start_pfn 48 include/trace/events/compaction.h unsigned long start_pfn, start_pfn 53 include/trace/events/compaction.h TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) start_pfn 59 include/trace/events/compaction.h unsigned long start_pfn, start_pfn 64 include/trace/events/compaction.h TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) start_pfn 13 include/trace/events/page_isolation.h unsigned long start_pfn, start_pfn 17 include/trace/events/page_isolation.h TP_ARGS(start_pfn, end_pfn, fin_pfn), start_pfn 20 include/trace/events/page_isolation.h __field(unsigned long, start_pfn) start_pfn 26 include/trace/events/page_isolation.h __entry->start_pfn = start_pfn; start_pfn 32 include/trace/events/page_isolation.h __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, start_pfn 38 include/xen/page.h unsigned long start_pfn; start_pfn 361 kernel/power/snapshot.c unsigned long start_pfn; /* Zone start page frame */ start_pfn 515 kernel/power/snapshot.c zone->start_pfn = start; start_pfn 716 kernel/power/snapshot.c if (pfn >= zone->start_pfn && pfn < zone->end_pfn) start_pfn 723 kernel/power/snapshot.c if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { start_pfn 745 kernel/power/snapshot.c ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) start_pfn 749 kernel/power/snapshot.c block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; start_pfn 764 kernel/power/snapshot.c bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; start_pfn 768 kernel/power/snapshot.c *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; start_pfn 888 kernel/power/snapshot.c pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; start_pfn 893 kernel/power/snapshot.c pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; start_pfn 908 kernel/power/snapshot.c unsigned long start_pfn; start_pfn 948 kernel/power/snapshot.c void __init __register_nosave_region(unsigned long start_pfn, start_pfn 953 kernel/power/snapshot.c if (start_pfn >= end_pfn) start_pfn 960 kernel/power/snapshot.c if (region->end_pfn == start_pfn) { start_pfn 977 kernel/power/snapshot.c region->start_pfn = start_pfn; start_pfn 982 kernel/power/snapshot.c (unsigned long long) start_pfn << PAGE_SHIFT, start_pfn 1054 kernel/power/snapshot.c (unsigned long long) region->start_pfn << PAGE_SHIFT, start_pfn 1058 kernel/power/snapshot.c for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) start_pfn 475 kernel/resource.c int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, start_pfn 484 kernel/resource.c start = (u64) start_pfn << PAGE_SHIFT; start_pfn 485 kernel/resource.c end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; start_pfn 536 mm/compaction.c unsigned long *start_pfn, start_pfn 546 mm/compaction.c unsigned long blockpfn = *start_pfn; start_pfn 647 mm/compaction.c trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, start_pfn 651 mm/compaction.c *start_pfn = blockpfn; start_pfn 683 mm/compaction.c unsigned long start_pfn, unsigned long end_pfn) start_pfn 688 mm/compaction.c pfn = start_pfn; start_pfn 791 mm/compaction.c unsigned long start_pfn = low_pfn; start_pfn 1063 mm/compaction.c trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, start_pfn 1085 mm/compaction.c isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, start_pfn 1091 mm/compaction.c pfn = start_pfn; start_pfn 1224 mm/compaction.c unsigned long start_pfn, end_pfn; start_pfn 1236 mm/compaction.c start_pfn = pageblock_start_pfn(pfn); start_pfn 1240 mm/compaction.c if (start_pfn != pfn) { start_pfn 1241 mm/compaction.c isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); start_pfn 1247 mm/compaction.c start_pfn = pfn + nr_isolated; start_pfn 1248 mm/compaction.c if (start_pfn < end_pfn) start_pfn 1249 mm/compaction.c isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); start_pfn 2075 mm/compaction.c unsigned long start_pfn = cc->zone->zone_start_pfn; start_pfn 2117 mm/compaction.c cc->migrate_pfn = start_pfn; start_pfn 2122 mm/compaction.c if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { start_pfn 2126 mm/compaction.c if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { start_pfn 2127 mm/compaction.c cc->migrate_pfn = start_pfn; start_pfn 2149 mm/compaction.c trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, start_pfn 2156 mm/compaction.c unsigned long start_pfn = cc->migrate_pfn; start_pfn 2168 mm/compaction.c pageblock_start_pfn(start_pfn)) { start_pfn 2193 mm/compaction.c last_migrated_pfn = start_pfn; start_pfn 2282 mm/compaction.c trace_mm_compaction_end(start_pfn, cc->migrate_pfn, start_pfn 1073 mm/hugetlb.c static int __alloc_gigantic_page(unsigned long start_pfn, start_pfn 1076 mm/hugetlb.c unsigned long end_pfn = start_pfn + nr_pages; start_pfn 1077 mm/hugetlb.c return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, start_pfn 1082 mm/hugetlb.c unsigned long start_pfn, unsigned long nr_pages) start_pfn 1084 mm/hugetlb.c unsigned long i, end_pfn = start_pfn + nr_pages; start_pfn 1087 mm/hugetlb.c for (i = start_pfn; i < end_pfn; i++) { start_pfn 1109 mm/hugetlb.c unsigned long start_pfn, unsigned long nr_pages) start_pfn 1111 mm/hugetlb.c unsigned long last_pfn = start_pfn + nr_pages - 1; start_pfn 1649 mm/hugetlb.c int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) start_pfn 1658 mm/hugetlb.c for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { start_pfn 147 mm/internal.h extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, start_pfn 150 mm/internal.h static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, start_pfn 154 mm/internal.h return pfn_to_page(start_pfn); start_pfn 156 mm/internal.h return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); start_pfn 219 mm/internal.h unsigned long start_pfn, unsigned long end_pfn); start_pfn 462 mm/internal.h extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, start_pfn 465 mm/internal.h static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, start_pfn 681 mm/kasan/common.c start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); start_pfn 705 mm/kasan/common.c pfn_to_nid(mem_data->start_pfn), start_pfn 1443 mm/kmemleak.c unsigned long start_pfn = node_start_pfn(i); start_pfn 1447 mm/kmemleak.c for (pfn = start_pfn; pfn < end_pfn; pfn++) { start_pfn 2724 mm/ksm.c unsigned long start_pfn, start_pfn 2727 mm/ksm.c if (stable_node->kpfn >= start_pfn && start_pfn 2740 mm/ksm.c unsigned long start_pfn, start_pfn 2749 mm/ksm.c return stable_node_dup_remove_range(stable_node, start_pfn, start_pfn 2756 mm/ksm.c stable_node_dup_remove_range(dup, start_pfn, end_pfn); start_pfn 2765 mm/ksm.c static void ksm_check_stable_tree(unsigned long start_pfn, start_pfn 2777 mm/ksm.c start_pfn, end_pfn, start_pfn 2787 mm/ksm.c if (stable_node->kpfn >= start_pfn && start_pfn 2821 mm/ksm.c ksm_check_stable_tree(mn->start_pfn, start_pfn 2822 mm/ksm.c mn->start_pfn + mn->nr_pages); start_pfn 1601 mm/memblock.c unsigned long start_pfn, end_pfn; start_pfn 1604 mm/memblock.c start_pfn = memblock_region_memory_base_pfn(r); start_pfn 1606 mm/memblock.c start_pfn = min_t(unsigned long, start_pfn, limit_pfn); start_pfn 1608 mm/memblock.c pages += end_pfn - start_pfn; start_pfn 1751 mm/memblock.c unsigned long *start_pfn, unsigned long *end_pfn) start_pfn 1759 mm/memblock.c *start_pfn = PFN_DOWN(type->regions[mid].base); start_pfn 1911 mm/memblock.c unsigned long start_pfn = PFN_UP(start); start_pfn 1915 mm/memblock.c if (start_pfn >= end_pfn) start_pfn 1918 mm/memblock.c __free_pages_memory(start_pfn, end_pfn); start_pfn 1920 mm/memblock.c return end_pfn - start_pfn; start_pfn 166 mm/memory_hotplug.c static void register_page_bootmem_info_section(unsigned long start_pfn) start_pfn 173 mm/memory_hotplug.c section_nr = pfn_to_section_nr(start_pfn); start_pfn 201 mm/memory_hotplug.c static void register_page_bootmem_info_section(unsigned long start_pfn) start_pfn 208 mm/memory_hotplug.c section_nr = pfn_to_section_nr(start_pfn); start_pfn 330 mm/memory_hotplug.c unsigned long start_pfn, start_pfn 333 mm/memory_hotplug.c for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { start_pfn 334 mm/memory_hotplug.c if (unlikely(!pfn_to_online_page(start_pfn))) start_pfn 337 mm/memory_hotplug.c if (unlikely(pfn_to_nid(start_pfn) != nid)) start_pfn 340 mm/memory_hotplug.c if (zone && zone != page_zone(pfn_to_page(start_pfn))) start_pfn 343 mm/memory_hotplug.c return start_pfn; start_pfn 351 mm/memory_hotplug.c unsigned long start_pfn, start_pfn 358 mm/memory_hotplug.c for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { start_pfn 374 mm/memory_hotplug.c static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, start_pfn 384 mm/memory_hotplug.c if (zone_start_pfn == start_pfn) { start_pfn 405 mm/memory_hotplug.c start_pfn); start_pfn 425 mm/memory_hotplug.c if (pfn >= start_pfn && pfn < end_pfn) start_pfn 469 mm/memory_hotplug.c unsigned long start_pfn, start_pfn 488 mm/memory_hotplug.c shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); start_pfn 616 mm/memory_hotplug.c static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, start_pfn 619 mm/memory_hotplug.c const unsigned long end_pfn = start_pfn + nr_pages; start_pfn 628 mm/memory_hotplug.c for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) { start_pfn 637 mm/memory_hotplug.c online_mem_sections(start_pfn, end_pfn); start_pfn 675 mm/memory_hotplug.c static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, start_pfn 680 mm/memory_hotplug.c if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) start_pfn 681 mm/memory_hotplug.c zone->zone_start_pfn = start_pfn; start_pfn 683 mm/memory_hotplug.c zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; start_pfn 686 mm/memory_hotplug.c static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, start_pfn 691 mm/memory_hotplug.c if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) start_pfn 692 mm/memory_hotplug.c pgdat->node_start_pfn = start_pfn; start_pfn 694 mm/memory_hotplug.c pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; start_pfn 702 mm/memory_hotplug.c void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, start_pfn 715 mm/memory_hotplug.c init_currently_empty_zone(zone, start_pfn, nr_pages); start_pfn 716 mm/memory_hotplug.c resize_zone_range(zone, start_pfn, nr_pages); start_pfn 718 mm/memory_hotplug.c resize_pgdat_range(pgdat, start_pfn, nr_pages); start_pfn 727 mm/memory_hotplug.c memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, start_pfn 738 mm/memory_hotplug.c static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, start_pfn 747 mm/memory_hotplug.c if (zone_intersects(zone, start_pfn, nr_pages)) start_pfn 754 mm/memory_hotplug.c static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, start_pfn 757 mm/memory_hotplug.c struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, start_pfn 760 mm/memory_hotplug.c bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); start_pfn 761 mm/memory_hotplug.c bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); start_pfn 778 mm/memory_hotplug.c struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, start_pfn 782 mm/memory_hotplug.c return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); start_pfn 787 mm/memory_hotplug.c return default_zone_for_pfn(nid, start_pfn, nr_pages); start_pfn 815 mm/memory_hotplug.c arg.start_pfn = pfn; start_pfn 895 mm/memory_hotplug.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 927 mm/memory_hotplug.c pgdat->node_start_pfn = start_pfn; start_pfn 1193 mm/memory_hotplug.c bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) start_pfn 1197 mm/memory_hotplug.c end_pfn = min(start_pfn + nr_pages, start_pfn 1198 mm/memory_hotplug.c zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); start_pfn 1201 mm/memory_hotplug.c for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { start_pfn 1215 mm/memory_hotplug.c int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, start_pfn 1223 mm/memory_hotplug.c for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); start_pfn 1311 mm/memory_hotplug.c do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) start_pfn 1318 mm/memory_hotplug.c for (pfn = start_pfn; pfn < end_pfn; pfn++) { start_pfn 1403 mm/memory_hotplug.c check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, start_pfn 1406 mm/memory_hotplug.c return test_pages_isolated(start_pfn, start_pfn + nr_pages, true); start_pfn 1487 mm/memory_hotplug.c static int __ref __offline_pages(unsigned long start_pfn, start_pfn 1503 mm/memory_hotplug.c if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, start_pfn 1512 mm/memory_hotplug.c nr_pages = end_pfn - start_pfn; start_pfn 1515 mm/memory_hotplug.c ret = start_isolate_page_range(start_pfn, end_pfn, start_pfn 1524 mm/memory_hotplug.c arg.start_pfn = start_pfn; start_pfn 1536 mm/memory_hotplug.c for (pfn = start_pfn; pfn;) { start_pfn 1561 mm/memory_hotplug.c ret = dissolve_free_huge_pages(start_pfn, end_pfn); start_pfn 1567 mm/memory_hotplug.c ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, start_pfn 1573 mm/memory_hotplug.c walk_system_ram_range(start_pfn, end_pfn - start_pfn, start_pfn 1586 mm/memory_hotplug.c adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); start_pfn 1611 mm/memory_hotplug.c remove_pfn_range_from_zone(zone, start_pfn, nr_pages); start_pfn 1616 mm/memory_hotplug.c undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); start_pfn 1620 mm/memory_hotplug.c (unsigned long long) start_pfn << PAGE_SHIFT, start_pfn 1628 mm/memory_hotplug.c int offline_pages(unsigned long start_pfn, unsigned long nr_pages) start_pfn 1630 mm/memory_hotplug.c return __offline_pages(start_pfn, start_pfn + nr_pages); start_pfn 566 mm/page_alloc.c unsigned long sp, start_pfn; start_pfn 570 mm/page_alloc.c start_pfn = zone->zone_start_pfn; start_pfn 579 mm/page_alloc.c start_pfn, start_pfn + sp); start_pfn 1392 mm/page_alloc.c unsigned long start_pfn = PFN_DOWN(start); start_pfn 1395 mm/page_alloc.c for (; start_pfn < end_pfn; start_pfn++) { start_pfn 1396 mm/page_alloc.c if (pfn_valid(start_pfn)) { start_pfn 1397 mm/page_alloc.c struct page *page = pfn_to_page(start_pfn); start_pfn 1399 mm/page_alloc.c init_reserved_page(start_pfn); start_pfn 1515 mm/page_alloc.c struct page *__pageblock_pfn_to_page(unsigned long start_pfn, start_pfn 1524 mm/page_alloc.c if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) start_pfn 1527 mm/page_alloc.c start_page = pfn_to_online_page(start_pfn); start_pfn 1724 mm/page_alloc.c deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, start_pfn 1727 mm/page_alloc.c unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); start_pfn 1728 mm/page_alloc.c unsigned long spfn = *start_pfn, epfn = *end_pfn; start_pfn 1733 mm/page_alloc.c for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { start_pfn 1736 mm/page_alloc.c if (mo_pfn <= *start_pfn) start_pfn 1740 mm/page_alloc.c nr_pages += deferred_init_pages(zone, *start_pfn, t); start_pfn 1743 mm/page_alloc.c *start_pfn = mo_pfn; start_pfn 2282 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 2288 mm/page_alloc.c start_pfn = page_to_pfn(page); start_pfn 2289 mm/page_alloc.c start_pfn = start_pfn & ~(pageblock_nr_pages-1); start_pfn 2290 mm/page_alloc.c start_page = pfn_to_page(start_pfn); start_pfn 2292 mm/page_alloc.c end_pfn = start_pfn + pageblock_nr_pages - 1; start_pfn 2295 mm/page_alloc.c if (!zone_spans_pfn(zone, start_pfn)) start_pfn 5878 mm/page_alloc.c unsigned long start_pfn, enum memmap_context context, start_pfn 5881 mm/page_alloc.c unsigned long pfn, end_pfn = start_pfn + size; start_pfn 5899 mm/page_alloc.c if (start_pfn == altmap->base_pfn) start_pfn 5900 mm/page_alloc.c start_pfn += altmap->reserve; start_pfn 5905 mm/page_alloc.c for (pfn = start_pfn; pfn < end_pfn; pfn++) { start_pfn 5947 mm/page_alloc.c unsigned long start_pfn, start_pfn 5951 mm/page_alloc.c unsigned long pfn, end_pfn = start_pfn + size; start_pfn 5967 mm/page_alloc.c start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); start_pfn 5968 mm/page_alloc.c size = end_pfn - start_pfn; start_pfn 5971 mm/page_alloc.c for (pfn = start_pfn; pfn < end_pfn; pfn++) { start_pfn 6029 mm/page_alloc.c unsigned long zone, unsigned long start_pfn) start_pfn 6031 mm/page_alloc.c memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL); start_pfn 6238 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 6244 mm/page_alloc.c nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); start_pfn 6246 mm/page_alloc.c state->last_start = start_pfn; start_pfn 6266 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 6269 mm/page_alloc.c for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { start_pfn 6270 mm/page_alloc.c start_pfn = min(start_pfn, max_low_pfn); start_pfn 6273 mm/page_alloc.c if (start_pfn < end_pfn) start_pfn 6274 mm/page_alloc.c memblock_free_early_nid(PFN_PHYS(start_pfn), start_pfn 6275 mm/page_alloc.c (end_pfn - start_pfn) << PAGE_SHIFT, start_pfn 6289 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 6292 mm/page_alloc.c for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) start_pfn 6293 mm/page_alloc.c memory_present(this_nid, start_pfn, end_pfn); start_pfn 6308 mm/page_alloc.c unsigned long *start_pfn, unsigned long *end_pfn) start_pfn 6313 mm/page_alloc.c *start_pfn = -1UL; start_pfn 6317 mm/page_alloc.c *start_pfn = min(*start_pfn, this_start_pfn); start_pfn 6321 mm/page_alloc.c if (*start_pfn == -1UL) start_pfn 6322 mm/page_alloc.c *start_pfn = 0; start_pfn 6429 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 6432 mm/page_alloc.c for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { start_pfn 6433 mm/page_alloc.c start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); start_pfn 6435 mm/page_alloc.c nr_absent -= end_pfn - start_pfn; start_pfn 6447 mm/page_alloc.c unsigned long __init absent_pages_in_range(unsigned long start_pfn, start_pfn 6450 mm/page_alloc.c return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); start_pfn 6483 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 6487 mm/page_alloc.c start_pfn = clamp(memblock_region_memory_base_pfn(r), start_pfn 6494 mm/page_alloc.c nr_absent += end_pfn - start_pfn; start_pfn 6498 mm/page_alloc.c nr_absent += end_pfn - start_pfn; start_pfn 6886 mm/page_alloc.c unsigned long start_pfn = 0; start_pfn 6896 mm/page_alloc.c get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); start_pfn 6898 mm/page_alloc.c (u64)start_pfn << PAGE_SHIFT, start_pfn 6901 mm/page_alloc.c start_pfn = node_start_pfn; start_pfn 6903 mm/page_alloc.c calculate_node_totalpages(pgdat, start_pfn, end_pfn, start_pfn 7053 mm/page_alloc.c unsigned long start_pfn; start_pfn 7056 mm/page_alloc.c for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) start_pfn 7057 mm/page_alloc.c min_pfn = min(min_pfn, start_pfn); start_pfn 7086 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 7089 mm/page_alloc.c for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { start_pfn 7090 mm/page_alloc.c unsigned long pages = end_pfn - start_pfn; start_pfn 7217 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 7235 mm/page_alloc.c for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { start_pfn 7238 mm/page_alloc.c start_pfn = max(start_pfn, zone_movable_pfn[nid]); start_pfn 7239 mm/page_alloc.c if (start_pfn >= end_pfn) start_pfn 7243 mm/page_alloc.c if (start_pfn < usable_startpfn) { start_pfn 7246 mm/page_alloc.c - start_pfn; start_pfn 7265 mm/page_alloc.c start_pfn = usable_startpfn; start_pfn 7273 mm/page_alloc.c size_pages = end_pfn - start_pfn; start_pfn 7276 mm/page_alloc.c zone_movable_pfn[nid] = start_pfn + size_pages; start_pfn 7344 mm/page_alloc.c unsigned long start_pfn, end_pfn; start_pfn 7353 mm/page_alloc.c start_pfn = find_min_pfn_with_active_regions(); start_pfn 7359 mm/page_alloc.c end_pfn = max(max_zone_pfn[i], start_pfn); start_pfn 7360 mm/page_alloc.c arch_zone_lowest_possible_pfn[i] = start_pfn; start_pfn 7363 mm/page_alloc.c start_pfn = end_pfn; start_pfn 7401 mm/page_alloc.c for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { start_pfn 7403 mm/page_alloc.c (u64)start_pfn << PAGE_SHIFT, start_pfn 7405 mm/page_alloc.c subsection_map_init(start_pfn, end_pfn - start_pfn); start_pfn 8567 mm/page_alloc.c __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) start_pfn 8577 mm/page_alloc.c for (pfn = start_pfn; pfn < end_pfn; pfn++) start_pfn 8586 mm/page_alloc.c pfn = start_pfn; start_pfn 286 mm/page_ext.c static int __meminit online_page_ext(unsigned long start_pfn, start_pfn 293 mm/page_ext.c start = SECTION_ALIGN_DOWN(start_pfn); start_pfn 294 mm/page_ext.c end = SECTION_ALIGN_UP(start_pfn + nr_pages); start_pfn 302 mm/page_ext.c nid = pfn_to_nid(start_pfn); start_pfn 321 mm/page_ext.c static int __meminit offline_page_ext(unsigned long start_pfn, start_pfn 326 mm/page_ext.c start = SECTION_ALIGN_DOWN(start_pfn); start_pfn 327 mm/page_ext.c end = SECTION_ALIGN_UP(start_pfn + nr_pages); start_pfn 343 mm/page_ext.c ret = online_page_ext(mn->start_pfn, start_pfn 347 mm/page_ext.c offline_page_ext(mn->start_pfn, start_pfn 351 mm/page_ext.c offline_page_ext(mn->start_pfn, start_pfn 375 mm/page_ext.c unsigned long start_pfn, end_pfn; start_pfn 377 mm/page_ext.c start_pfn = node_start_pfn(nid); start_pfn 384 mm/page_ext.c for (pfn = start_pfn; pfn < end_pfn; start_pfn 39 mm/page_isolation.c arg.start_pfn = pfn; start_pfn 193 mm/page_isolation.c int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_pfn 201 mm/page_isolation.c BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); start_pfn 204 mm/page_isolation.c for (pfn = start_pfn; start_pfn 218 mm/page_isolation.c for (pfn = start_pfn; start_pfn 233 mm/page_isolation.c void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, start_pfn 239 mm/page_isolation.c BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); start_pfn 242 mm/page_isolation.c for (pfn = start_pfn; start_pfn 288 mm/page_isolation.c int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, start_pfn 300 mm/page_isolation.c for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { start_pfn 305 mm/page_isolation.c page = __first_valid_page(start_pfn, end_pfn - start_pfn); start_pfn 311 mm/page_isolation.c pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, start_pfn 315 mm/page_isolation.c trace_test_pages_isolated(start_pfn, end_pfn, pfn); start_pfn 108 mm/shuffle.c unsigned long start_pfn = z->zone_start_pfn; start_pfn 114 mm/shuffle.c start_pfn = ALIGN(start_pfn, order_pages); start_pfn 115 mm/shuffle.c for (i = start_pfn; i < end_pfn; i += order_pages) { start_pfn 156 mm/sparse.c void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, start_pfn 165 mm/sparse.c if (*start_pfn > max_sparsemem_pfn) { start_pfn 168 mm/sparse.c *start_pfn, *end_pfn, max_sparsemem_pfn); start_pfn 170 mm/sparse.c *start_pfn = max_sparsemem_pfn; start_pfn 175 mm/sparse.c *start_pfn, *end_pfn, max_sparsemem_pfn); start_pfn 609 mm/sparse.c void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) start_pfn 613 mm/sparse.c for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { start_pfn 628 mm/sparse.c void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) start_pfn 632 mm/sparse.c for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { start_pfn 877 mm/sparse.c int __meminit sparse_add_section(int nid, unsigned long start_pfn, start_pfn 880 mm/sparse.c unsigned long section_nr = pfn_to_section_nr(start_pfn); start_pfn 889 mm/sparse.c memmap = section_activate(nid, start_pfn, nr_pages, altmap); start_pfn 904 mm/sparse.c if (section_nr_to_pfn(section_nr) != start_pfn) start_pfn 1436 mm/vmstat.c unsigned long start_pfn = zone->zone_start_pfn; start_pfn 1440 mm/vmstat.c for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {