spanned_pages 803 arch/powerpc/mm/numa.c u64 spanned_pages = end_pfn - start_pfn; spanned_pages 827 arch/powerpc/mm/numa.c NODE_DATA(nid)->node_spanned_pages = spanned_pages; spanned_pages 125 arch/x86/mm/highmem_32.c zone_end_pfn = zone_start_pfn + zone->spanned_pages; spanned_pages 490 include/linux/mmzone.h unsigned long spanned_pages; spanned_pages 593 include/linux/mmzone.h return zone->zone_start_pfn + zone->spanned_pages; spanned_pages 608 include/linux/mmzone.h return zone->spanned_pages == 0; spanned_pages 442 kernel/crash_core.c VMCOREINFO_OFFSET(zone, spanned_pages); spanned_pages 1182 kernel/power/snapshot.c rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); spanned_pages 395 mm/memory_hotplug.c zone->spanned_pages = zone_end_pfn - pfn; spanned_pages 407 mm/memory_hotplug.c zone->spanned_pages = pfn - zone_start_pfn + 1; spanned_pages 435 mm/memory_hotplug.c zone->spanned_pages = 0; spanned_pages 447 mm/memory_hotplug.c zone->spanned_pages; spanned_pages 450 mm/memory_hotplug.c if (!zone->spanned_pages) spanned_pages 683 mm/memory_hotplug.c zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; spanned_pages 571 mm/page_alloc.c sp = zone->spanned_pages; spanned_pages 6566 mm/page_alloc.c zone->spanned_pages = size; spanned_pages 6658 mm/page_alloc.c static unsigned long __init calc_memmap_size(unsigned long spanned_pages, spanned_pages 6661 mm/page_alloc.c unsigned long pages = spanned_pages; spanned_pages 6671 mm/page_alloc.c if (spanned_pages > present_pages + (present_pages >> 4) && spanned_pages 6768 mm/page_alloc.c size = zone->spanned_pages; spanned_pages 260 mm/page_owner.c unsigned long end_pfn = pfn + zone->spanned_pages; spanned_pages 138 mm/shuffle.c ALIGN_DOWN(get_random_long() % z->spanned_pages, spanned_pages 1585 mm/vmstat.c zone->spanned_pages,