/linux-4.1.27/mm/ |
D | page_owner.c | 271 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() local 274 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
|
D | mmzone.c | 33 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 38 zone = pgdat->node_zones; in next_zone()
|
D | memory_hotplug.c | 258 zone = &pgdat->node_zones[0]; in register_page_bootmem_info_node() 259 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { in register_page_bootmem_info_node() 452 zone_type = zone - pgdat->node_zones; in __add_zone() 716 zone_type = zone - pgdat->node_zones; in __remove_zone() 1071 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_present_pages() 1198 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE; in should_add_memory_movable() 1529 present_pages += pgdat->node_zones[zt].present_pages; in can_offline_normal() 1536 present_pages += pgdat->node_zones[zt].present_pages; in can_offline_normal() 1605 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() 1626 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() [all …]
|
D | nobootmem.c | 152 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
|
D | quicklist.c | 30 struct zone *zones = NODE_DATA(node)->node_zones; in max_pages()
|
D | highmem.c | 119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], in nr_free_highpages() 123 &pgdat->node_zones[ZONE_MOVABLE], in nr_free_highpages()
|
D | vmstat.c | 205 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 885 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node() local 888 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
|
D | mm_init.c | 45 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
|
D | workingset.c | 181 *zone = NODE_DATA(nid)->node_zones + zid; in unpack_shadow()
|
D | page_alloc.c | 2094 struct zone *zone = preferred_zone->zone_pgdat->node_zones; in reset_alloc_batches() 3204 managed_pages += pgdat->node_zones[zone_type].managed_pages; in si_meminfo_node() 3209 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; in si_meminfo_node() 3210 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], in si_meminfo_node() 3459 zone = pgdat->node_zones + zone_type; in build_zonelists_node() 3699 z = &NODE_DATA(node)->node_zones[zone_type]; in build_zonelists_in_zone_order() 4208 z = &NODE_DATA(nid)->node_zones[zone]; in memmap_init_zone() 4946 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() 5395 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() 5697 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() [all …]
|
D | bootmem.c | 251 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
|
D | vmscan.c | 2480 while (!populated_zone(zone->zone_pgdat->node_zones + in shrink_zones() 2644 zone = &pgdat->node_zones[i]; in pfmemalloc_watermark_ok() 2936 struct zone *zone = pgdat->node_zones + i; in pgdat_balanced() 3120 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() 3163 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() 3197 struct zone *zone = pgdat->node_zones + i; in balance_pgdat()
|
D | compaction.c | 248 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 1591 zone = &pgdat->node_zones[zoneid]; in __compact_pgdat()
|
D | migrate.c | 1536 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat()
|
D | page-writeback.c | 203 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; in highmem_dirtyable_memory()
|
D | hugetlb.c | 824 z = NODE_DATA(nid)->node_zones; in alloc_gigantic_page() 825 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) { in alloc_gigantic_page()
|
/linux-4.1.27/arch/m32r/mm/ |
D | discontig.c | 159 NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; in zone_sizes_init() 160 NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; in zone_sizes_init() 161 NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; in zone_sizes_init()
|
/linux-4.1.27/include/linux/ |
D | mmzone.h | 719 struct zone node_zones[MAX_NR_ZONES]; member 836 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 875 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; in is_highmem() 935 for (zone = (first_online_pgdat())->node_zones; \ 940 for (zone = (first_online_pgdat())->node_zones; \
|
D | vmstat.h | 172 struct zone *zones = NODE_DATA(node)->node_zones; in node_page_state()
|
D | mm.h | 880 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
/linux-4.1.27/arch/c6x/mm/ |
D | init.c | 53 pgdat->node_zones[ZONE_NORMAL].zone_start_pfn = in paging_init()
|
/linux-4.1.27/lib/ |
D | show_mem.c | 26 struct zone *zone = &pgdat->node_zones[zoneid]; in show_mem()
|
/linux-4.1.27/arch/ia64/mm/ |
D | init.c | 665 zone = pgdat->node_zones + in arch_add_memory() 722 struct zone *zone = &pgdat->node_zones[zoneid]; in show_mem()
|
/linux-4.1.27/arch/powerpc/mm/ |
D | mem.c | 130 zone = pgdata->node_zones + in arch_add_memory()
|
/linux-4.1.27/arch/sh/mm/ |
D | init.c | 498 ret = __add_pages(nid, pgdat->node_zones + in arch_add_memory()
|
/linux-4.1.27/arch/x86/mm/ |
D | init_32.c | 829 struct zone *zone = pgdata->node_zones + in arch_add_memory()
|
D | init_64.c | 693 struct zone *zone = pgdat->node_zones + in arch_add_memory()
|
/linux-4.1.27/arch/tile/mm/ |
D | init.c | 869 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; in arch_add_memory()
|
/linux-4.1.27/arch/parisc/mm/ |
D | init.c | 660 struct zone *zone = &pgdat->node_zones[zoneid]; in show_mem()
|
/linux-4.1.27/kernel/ |
D | kexec.c | 1975 VMCOREINFO_OFFSET(pglist_data, node_zones); in crash_save_vmcoreinfo_init()
|