/linux-4.4.14/mm/ |
D | workingset.c | 215 struct zone *zone = page_zone(page); in workingset_eviction() 252 atomic_long_inc(&page_zone(page)->inactive_age); in workingset_activation() 359 inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); in shadow_lru_isolate()
|
D | page_isolation.c | 21 zone = page_zone(page); in set_migratetype_isolate() 85 zone = page_zone(page); in unset_migratetype_isolate() 267 zone = page_zone(page); in test_pages_isolated()
|
D | mlock.c | 62 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock() 86 mod_zone_page_state(page_zone(page), NR_MLOCK, in mlock_vma_page() 103 lruvec = mem_cgroup_page_lruvec(page, page_zone(page)); in __munlock_isolate_lru_page() 176 struct zone *zone = page_zone(page); in munlock_vma_page() 465 zone = page_zone(page); in munlock_vma_pages_range()
|
D | swap.c | 56 struct zone *zone = page_zone(page); in __page_cache_release() 429 struct zone *pagezone = page_zone(page); in pagevec_lru_move_fn() 560 struct zone *zone = page_zone(page); in activate_page() 689 struct zone *zone = page_zone(page); in add_page_to_unevictable_list() 728 __mod_zone_page_state(page_zone(page), NR_MLOCK, in lru_cache_add_active_or_unevictable() 944 struct zone *pagezone = page_zone(page); in release_pages()
|
D | internal.h | 304 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page() 306 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); in mlock_migrate_page()
|
D | memory_hotplug.c | 547 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 575 if (zone && zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 634 if (page_zone(pfn_to_page(pfn)) != zone) in shrink_zone_span() 990 zone = page_zone(pfn_to_page(pfn)); in online_pages() 1009 zone = page_zone(pfn_to_page(pfn)); in online_pages() 1398 if (zone && page_zone(page) != zone) in test_pages_in_a_zone() 1400 zone = page_zone(page); in test_pages_in_a_zone() 1734 zone = page_zone(pfn_to_page(start_pfn)); in __offline_pages()
|
D | mmzone.c | 81 if (page_zone(page) != zone) in memmap_valid_within()
|
D | cma.c | 110 zone = page_zone(pfn_to_page(pfn)); in cma_activate_area() 124 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area()
|
D | vmstat.c | 282 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 304 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state() 373 mod_state(page_zone(page), item, 1, 1); in inc_zone_page_state() 379 mod_state(page_zone(page), item, -1, -1); in dec_zone_page_state() 411 zone = page_zone(page); in inc_zone_page_state()
|
D | migrate.c | 337 oldzone = page_zone(page); in migrate_page_move_mapping() 338 newzone = page_zone(newpage); in migrate_page_move_mapping() 1658 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page() 1805 mod_zone_page_state(page_zone(page), in migrate_misplaced_transhuge_page() 1859 mod_zone_page_state(page_zone(page), in migrate_misplaced_transhuge_page()
|
D | page_alloc.c | 376 if (zone != page_zone(page)) in page_is_consistent() 1022 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok() 1042 page_zone(page)->managed_pages += nr_pages; in __free_pages_boot_core() 1222 VM_BUG_ON(page_zone(page) != zone); in deferred_init_memmap() 1505 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); in move_freepages() 2046 struct zone *zone = page_zone(page); in free_hot_cold_page() 2146 zone = page_zone(page); in __isolate_free_page() 2809 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() 5802 page_zone(page)->managed_pages += count; in adjust_managed_page_count() 5838 page_zone(page)->managed_pages++; in free_highmem_page() [all …]
|
D | page_idle.c | 43 zone = page_zone(page); in page_idle_get_page()
|
D | rmap.c | 1165 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in do_page_add_anon_rmap() 1197 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_add_new_anon_rmap() 1277 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_remove_rmap()
|
D | compaction.c | 105 if (page_zone(start_page) != zone) in pageblock_pfn_to_page() 229 if (zone != page_zone(page)) in __reset_isolation_suitable()
|
D | vmscan.c | 919 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list() 1431 struct zone *zone = page_zone(page); in isolate_lru_page() 3860 pagezone = page_zone(page); in check_move_unevictable_pages()
|
D | slab.c | 1613 add_zone_page_state(page_zone(page), in kmem_getpages() 1616 add_zone_page_state(page_zone(page), in kmem_getpages() 1644 sub_zone_page_state(page_zone(page), in kmem_freepages() 1647 sub_zone_page_state(page_zone(page), in kmem_freepages()
|
D | memory-failure.c | 227 drain_all_pages(page_zone(p)); in shake_page()
|
D | filemap.c | 740 const struct zone *zone = page_zone(page); in page_waitqueue()
|
D | memcontrol.c | 2180 struct zone *zone = page_zone(page); in lock_page_lru() 2196 struct zone *zone = page_zone(page); in unlock_page_lru()
|
D | mempolicy.c | 1930 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0])) in alloc_page_interleave()
|
D | slub.c | 1481 mod_zone_page_state(page_zone(page), in allocate_slab() 1518 mod_zone_page_state(page_zone(page), in __free_slab()
|
D | huge_memory.c | 1708 struct zone *zone = page_zone(page); in __split_huge_page_refcount()
|
/linux-4.4.14/include/linux/ |
D | vmstat.h | 228 __inc_zone_state(page_zone(page), item); in __inc_zone_page_state() 234 __dec_zone_state(page_zone(page), item); in __dec_zone_page_state()
|
D | mm.h | 840 static inline struct zone *page_zone(const struct page *page) in page_zone() function
|
/linux-4.4.14/arch/powerpc/mm/ |
D | mem.c | 144 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/linux-4.4.14/arch/sh/mm/ |
D | init.c | 526 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/linux-4.4.14/kernel/sched/ |
D | wait.c | 489 const struct zone *zone = page_zone(virt_to_page(word)); in bit_waitqueue()
|
/linux-4.4.14/drivers/base/ |
D | memory.c | 403 zone = page_zone(first_page); in show_valid_zones()
|
/linux-4.4.14/arch/ia64/mm/ |
D | init.c | 677 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/linux-4.4.14/arch/x86/mm/ |
D | init_32.c | 844 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
D | init_64.c | 1024 zone = page_zone(pfn_to_page(start_pfn)); in arch_remove_memory()
|
/linux-4.4.14/kernel/power/ |
D | snapshot.c | 1089 if (page_zone(page) != zone) in saveable_highmem_page() 1151 if (page_zone(page) != zone) in saveable_page()
|
/linux-4.4.14/kernel/ |
D | fork.c | 217 struct zone *zone = page_zone(virt_to_page(ti)); in account_kernel_stack()
|