/linux-4.1.27/mm/ |
H A D | mmzone.c | 81 if (page_zone(page) != zone) memmap_valid_within()
|
H A D | page_isolation.c | 20 zone = page_zone(page); set_migratetype_isolate() 84 zone = page_zone(page); unset_migratetype_isolate() 238 move_freepages(page_zone(page), page, end_page, __test_page_isolated_in_pageblock() 284 zone = page_zone(page); test_pages_isolated()
|
H A D | internal.h | 211 * general, page_zone(page)->lock must be held by the caller to prevent the 213 * If a caller does not hold page_zone(page)->lock, it must guarantee that the 283 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); mlock_migrate_page() 285 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); mlock_migrate_page()
|
H A D | mlock.c | 62 mod_zone_page_state(page_zone(page), NR_MLOCK, clear_page_mlock() 86 mod_zone_page_state(page_zone(page), NR_MLOCK, mlock_vma_page() 103 lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); __munlock_isolate_lru_page() 176 struct zone *zone = page_zone(page); munlock_vma_page() 465 zone = page_zone(page); munlock_vma_pages_range()
|
H A D | workingset.c | 215 struct zone *zone = page_zone(page); workingset_eviction() 252 atomic_long_inc(&page_zone(page)->inactive_age); workingset_activation() 359 inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); shadow_lru_isolate()
|
H A D | swap.c | 55 struct zone *zone = page_zone(page); __page_cache_release() 429 struct zone *pagezone = page_zone(page); pagevec_lru_move_fn() 560 struct zone *zone = page_zone(page); activate_page() 687 struct zone *zone = page_zone(page); add_page_to_unevictable_list() 726 __mod_zone_page_state(page_zone(page), NR_MLOCK, lru_cache_add_active_or_unevictable() 942 struct zone *pagezone = page_zone(page); release_pages()
|
H A D | vmstat.c | 246 * generated for page_zone(page) into the optimized functions. 282 __inc_zone_state(page_zone(page), item); __inc_zone_page_state() 304 __dec_zone_state(page_zone(page), item); __dec_zone_page_state() 373 mod_state(page_zone(page), item, 1, 1); inc_zone_page_state() 379 mod_state(page_zone(page), item, -1, -1); dec_zone_page_state() 411 zone = page_zone(page); inc_zone_page_state()
|
H A D | cma.c | 110 zone = page_zone(pfn_to_page(pfn)); cma_activate_area() 124 if (page_zone(pfn_to_page(pfn)) != zone) cma_activate_area()
|
H A D | memory_hotplug.c | 538 if (zone && zone != page_zone(pfn_to_page(start_pfn))) find_smallest_section_pfn() 566 if (zone && zone != page_zone(pfn_to_page(pfn))) find_biggest_section_pfn() 625 if (page_zone(pfn_to_page(pfn)) != zone) shrink_zone_span() 978 zone = page_zone(pfn_to_page(pfn)); online_pages() 997 zone = page_zone(pfn_to_page(pfn)); online_pages() 1353 if (zone && page_zone(page) != zone) test_pages_in_a_zone() 1355 zone = page_zone(page); test_pages_in_a_zone() 1688 zone = page_zone(pfn_to_page(start_pfn)); __offline_pages()
|
H A D | page_alloc.c | 276 if (zone != page_zone(page)) page_is_consistent() 851 free_one_page(page_zone(page), page, pfn, order, migratetype); __free_pages_ok() 871 page_zone(page)->managed_pages += nr_pages; __free_pages_bootmem() 1099 * page_zone is not safe to call in this context when move_freepages() 1105 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); move_freepages() 1565 struct zone *zone = page_zone(page); free_hot_cold_page() 1663 zone = page_zone(page); __isolate_free_page() 2495 struct zone *zone = page_zone(page); __alloc_pages_direct_compact() 5534 page_zone(page)->managed_pages += count; adjust_managed_page_count() 5570 page_zone(page)->managed_pages++; free_highmem_page() 6197 zone = page_zone(page); get_pfnblock_flags_mask() 6228 zone = page_zone(page); set_pfnblock_flags_mask() 6342 * its NODE_DATA will be NULL - see page_zone. is_pageblock_removable_nolock() 6347 zone = page_zone(page); is_pageblock_removable_nolock() 6444 .zone = page_zone(pfn_to_page(start)), alloc_contig_range() 6608 zone = page_zone(pfn_to_page(pfn)); __offline_isolated_pages() 6648 struct zone *zone = page_zone(page); is_free_buddy_page()
|
H A D | rmap.c | 1049 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, do_page_add_anon_rmap() 1081 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, page_add_new_anon_rmap() 1161 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, page_remove_rmap()
|
H A D | compaction.c | 116 if (page_zone(start_page) != zone) pageblock_pfn_to_page() 121 /* This gives a shorter code than deriving page_zone(end_page) */ pageblock_pfn_to_page() 236 if (zone != page_zone(page)) __reset_isolation_suitable()
|
H A D | migrate.c | 1637 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, numamigrate_isolate_page() 1785 mod_zone_page_state(page_zone(page), migrate_misplaced_transhuge_page() 1838 mod_zone_page_state(page_zone(page), migrate_misplaced_transhuge_page()
|
H A D | slab.c | 1610 add_zone_page_state(page_zone(page), kmem_getpages() 1613 add_zone_page_state(page_zone(page), kmem_getpages() 1641 sub_zone_page_state(page_zone(page), kmem_freepages() 1644 sub_zone_page_state(page_zone(page), kmem_freepages()
|
H A D | vmscan.c | 882 VM_BUG_ON_PAGE(page_zone(page) != zone, page); shrink_page_list() 1383 struct zone *zone = page_zone(page); isolate_lru_page() 3795 pagezone = page_zone(page); check_move_unevictable_pages()
|
H A D | memory-failure.c | 236 drain_all_pages(page_zone(p)); shake_page()
|
H A D | slub.c | 1389 mod_zone_page_state(page_zone(page), allocate_slab() 1471 mod_zone_page_state(page_zone(page), __free_slab()
|
H A D | filemap.c | 673 const struct zone *zone = page_zone(page); page_waitqueue()
|
H A D | huge_memory.c | 1639 struct zone *zone = page_zone(page); __split_huge_page_refcount()
|
H A D | mempolicy.c | 1933 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) alloc_page_interleave()
|
H A D | memcontrol.c | 2390 struct zone *zone = page_zone(page); lock_page_lru() 2406 struct zone *zone = page_zone(page); unlock_page_lru()
|
/linux-4.1.27/include/linux/ |
H A D | vmstat.h | 250 __inc_zone_state(page_zone(page), item); __inc_zone_page_state() 256 __dec_zone_state(page_zone(page), item); __dec_zone_page_state()
|
H A D | page-flags.h | 252 * Must use a macro here due to header dependency issues. page_zone() is not 255 #define PageHighMem(__p) is_highmem(page_zone(__p))
|
H A D | mm.h | 878 static inline struct zone *page_zone(const struct page *page) page_zone() function 919 * Some inline functions in vmstat.h depend on page_zone()
|
H A D | mmzone.h | 1254 * never used. The page_zone linkages are then broken even though pfn_valid()
|
/linux-4.1.27/arch/sh/mm/ |
H A D | init.c | 525 zone = page_zone(pfn_to_page(start_pfn)); arch_remove_memory()
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | mem.c | 144 zone = page_zone(pfn_to_page(start_pfn)); arch_remove_memory()
|
/linux-4.1.27/drivers/base/ |
H A D | memory.c | 399 zone = page_zone(first_page); show_valid_zones()
|
/linux-4.1.27/arch/ia64/mm/ |
H A D | init.c | 684 zone = page_zone(pfn_to_page(start_pfn)); arch_remove_memory()
|
/linux-4.1.27/kernel/sched/ |
H A D | wait.c | 489 const struct zone *zone = page_zone(virt_to_page(word)); bit_waitqueue()
|
/linux-4.1.27/arch/x86/mm/ |
H A D | init_32.c | 844 zone = page_zone(pfn_to_page(start_pfn)); arch_remove_memory()
|
H A D | init_64.c | 1024 zone = page_zone(pfn_to_page(start_pfn)); arch_remove_memory()
|
/linux-4.1.27/kernel/power/ |
H A D | snapshot.c | 1089 if (page_zone(page) != zone) saveable_highmem_page() 1151 if (page_zone(page) != zone) saveable_page()
|
/linux-4.1.27/kernel/ |
H A D | fork.c | 217 struct zone *zone = page_zone(virt_to_page(ti)); account_kernel_stack()
|