pgdat              35 arch/c6x/mm/init.c 	struct pglist_data *pgdat = NODE_DATA(0);
pgdat              53 arch/c6x/mm/init.c 	pgdat->node_zones[ZONE_NORMAL].zone_start_pfn =
pgdat              57 arch/ia64/include/asm/nodedata.h #define LOCAL_DATA_ADDR(pgdat)  			\
pgdat              58 arch/ia64/include/asm/nodedata.h 	((struct ia64_node_data *)((u64)(pgdat) + 	\
pgdat             643 arch/ia64/mm/discontig.c void arch_free_nodedata(pg_data_t *pgdat)
pgdat             645 arch/ia64/mm/discontig.c 	kfree(pgdat);
pgdat             377 arch/ia64/mm/init.c 	pg_data_t *pgdat = NODE_DATA(node);
pgdat             379 arch/ia64/mm/init.c 	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
pgdat             381 arch/ia64/mm/init.c 	stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
pgdat             424 arch/ia64/mm/init.c 	return hole_next_pfn - pgdat->node_start_pfn;
pgdat             158 arch/m68k/include/asm/page_mm.h 	struct pglist_data *pgdat;					\
pgdat             159 arch/m68k/include/asm/page_mm.h 	pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn));	\
pgdat             160 arch/m68k/include/asm/page_mm.h 	pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn);		\
pgdat             164 arch/m68k/include/asm/page_mm.h 	struct pglist_data *pgdat;					\
pgdat             165 arch/m68k/include/asm/page_mm.h 	pgdat = &pg_data_map[page_to_nid(__p)];				\
pgdat             166 arch/m68k/include/asm/page_mm.h 	((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;		\
pgdat             344 arch/sh/mm/init.c 	pg_data_t *pgdat;
pgdat             347 arch/sh/mm/init.c 	for_each_online_pgdat(pgdat)
pgdat             349 arch/sh/mm/init.c 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
pgdat             366 drivers/base/node.c 	struct pglist_data *pgdat = NODE_DATA(nid);
pgdat             371 drivers/base/node.c 	sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE);
pgdat             372 drivers/base/node.c 	sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE);
pgdat             388 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
pgdat             389 drivers/base/node.c 				node_page_state(pgdat, NR_ACTIVE_FILE)),
pgdat             390 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
pgdat             391 drivers/base/node.c 				node_page_state(pgdat, NR_INACTIVE_FILE)),
pgdat             392 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
pgdat             393 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
pgdat             394 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
pgdat             395 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
pgdat             396 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
pgdat             434 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
pgdat             435 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_WRITEBACK)),
pgdat             436 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
pgdat             437 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
pgdat             438 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
pgdat             442 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
pgdat             444 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
pgdat             446 drivers/base/node.c 			      node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
pgdat             452 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_ANON_THPS) *
pgdat             454 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
pgdat             456 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
pgdat             458 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_FILE_THPS) *
pgdat             460 drivers/base/node.c 		       nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
pgdat             494 drivers/base/node.c 	struct pglist_data *pgdat = NODE_DATA(nid);
pgdat             513 drivers/base/node.c 			     node_page_state(pgdat, i));
pgdat             846 drivers/gpu/drm/amd/amdkfd/kfd_crat.c 	pg_data_t *pgdat;
pgdat             866 drivers/gpu/drm/amd/amdkfd/kfd_crat.c 	pgdat = NODE_DATA(numa_node_id);
pgdat             868 drivers/gpu/drm/amd/amdkfd/kfd_crat.c 		mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
pgdat              98 include/linux/compaction.h extern void reset_isolation_suitable(pg_data_t *pgdat);
pgdat             185 include/linux/compaction.h extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
pgdat             188 include/linux/compaction.h static inline void reset_isolation_suitable(pg_data_t *pgdat)
pgdat             235 include/linux/compaction.h static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
pgdat             126 include/linux/memblock.h void reset_node_managed_pages(pg_data_t *pgdat);
pgdat              60 include/linux/memcontrol.h 	pg_data_t *pgdat;
pgdat             405 include/linux/memcontrol.h static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
pgdat             412 include/linux/memcontrol.h 		lruvec = node_lruvec(pgdat);
pgdat             416 include/linux/memcontrol.h 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
pgdat             424 include/linux/memcontrol.h 	if (unlikely(lruvec->pgdat != pgdat))
pgdat             425 include/linux/memcontrol.h 		lruvec->pgdat = pgdat;
pgdat             723 include/linux/memcontrol.h 	pg_data_t *pgdat = page_pgdat(page);
pgdat             728 include/linux/memcontrol.h 		__mod_node_page_state(pgdat, idx, val);
pgdat             732 include/linux/memcontrol.h 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
pgdat             746 include/linux/memcontrol.h unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
pgdat             905 include/linux/memcontrol.h static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
pgdat             908 include/linux/memcontrol.h 	return node_lruvec(pgdat);
pgdat             912 include/linux/memcontrol.h 						    struct pglist_data *pgdat)
pgdat             914 include/linux/memcontrol.h 	return &pgdat->lruvec;
pgdat            1139 include/linux/memcontrol.h unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
pgdat             167 include/linux/memory_hotplug.h extern void arch_free_nodedata(pg_data_t *pgdat);
pgdat             168 include/linux/memory_hotplug.h extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
pgdat             173 include/linux/memory_hotplug.h #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
pgdat             190 include/linux/memory_hotplug.h #define generic_free_nodedata(pgdat)	kfree(pgdat)
pgdat             193 include/linux/memory_hotplug.h static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
pgdat             195 include/linux/memory_hotplug.h 	node_data[nid] = pgdat;
pgdat             206 include/linux/memory_hotplug.h static inline void generic_free_nodedata(pg_data_t *pgdat)
pgdat             209 include/linux/memory_hotplug.h static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
pgdat             216 include/linux/memory_hotplug.h extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
pgdat             218 include/linux/memory_hotplug.h static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
pgdat             263 include/linux/memory_hotplug.h static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
pgdat             289 include/linux/memory_hotplug.h void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
pgdat             291 include/linux/memory_hotplug.h 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
pgdat             294 include/linux/memory_hotplug.h void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
pgdat             296 include/linux/memory_hotplug.h 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
pgdat             299 include/linux/memory_hotplug.h void pgdat_resize_init(struct pglist_data *pgdat)
pgdat             301 include/linux/memory_hotplug.h 	spin_lock_init(&pgdat->node_size_lock);
pgdat             309 include/linux/memory_hotplug.h static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
pgdat              30 include/linux/mm_inline.h 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat              33 include/linux/mm_inline.h 	__mod_zone_page_state(&pgdat->node_zones[zid],
pgdat             306 include/linux/mmzone.h 	struct pglist_data *pgdat;
pgdat             786 include/linux/mmzone.h #define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
pgdat             788 include/linux/mmzone.h #define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
pgdat             795 include/linux/mmzone.h static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
pgdat             797 include/linux/mmzone.h 	return &pgdat->lruvec;
pgdat             800 include/linux/mmzone.h static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
pgdat             802 include/linux/mmzone.h 	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
pgdat             805 include/linux/mmzone.h static inline bool pgdat_is_empty(pg_data_t *pgdat)
pgdat             807 include/linux/mmzone.h 	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
pgdat             812 include/linux/mmzone.h void build_all_zonelists(pg_data_t *pgdat);
pgdat             835 include/linux/mmzone.h 	return lruvec->pgdat;
pgdat             976 include/linux/mmzone.h extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
pgdat             983 include/linux/mmzone.h #define for_each_online_pgdat(pgdat)			\
pgdat             984 include/linux/mmzone.h 	for (pgdat = first_online_pgdat();		\
pgdat             985 include/linux/mmzone.h 	     pgdat;					\
pgdat             986 include/linux/mmzone.h 	     pgdat = next_online_pgdat(pgdat))
pgdat             123 include/linux/node.h 		struct pglist_data *pgdat = NODE_DATA(nid);
pgdat             124 include/linux/node.h 		unsigned long start_pfn = pgdat->node_start_pfn;
pgdat             125 include/linux/node.h 		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
pgdat              40 include/linux/page_ext.h extern void pgdat_page_ext_init(struct pglist_data *pgdat);
pgdat              66 include/linux/page_ext.h static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
pgdat              19 include/linux/page_owner.h 					pg_data_t *pgdat, struct zone *zone);
pgdat             361 include/linux/swap.h 						pg_data_t *pgdat,
pgdat             170 include/linux/vmstat.h static inline void node_page_state_add(long x, struct pglist_data *pgdat,
pgdat             173 include/linux/vmstat.h 	atomic_long_add(x, &pgdat->vm_stat[item]);
pgdat             235 include/linux/vmstat.h extern unsigned long node_page_state(struct pglist_data *pgdat,
pgdat             278 include/linux/vmstat.h void set_pgdat_percpu_threshold(pg_data_t *pgdat,
pgdat             292 include/linux/vmstat.h static inline void __mod_node_page_state(struct pglist_data *pgdat,
pgdat             295 include/linux/vmstat.h 	node_page_state_add(delta, pgdat, item);
pgdat             304 include/linux/vmstat.h static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             306 include/linux/vmstat.h 	atomic_long_inc(&pgdat->vm_stat[item]);
pgdat             316 include/linux/vmstat.h static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             318 include/linux/vmstat.h 	atomic_long_dec(&pgdat->vm_stat[item]);
pgdat             364 include/linux/vmstat.h #define set_pgdat_percpu_threshold(pgdat, callback) { }
pgdat             345 include/linux/writeback.h bool node_dirty_ok(struct pglist_data *pgdat);
pgdat              13 lib/show_mem.c 	pg_data_t *pgdat;
pgdat              19 lib/show_mem.c 	for_each_online_pgdat(pgdat) {
pgdat              23 lib/show_mem.c 			struct zone *zone = &pgdat->node_zones[zoneid];
pgdat             369 mm/compaction.c void reset_isolation_suitable(pg_data_t *pgdat)
pgdat             374 mm/compaction.c 		struct zone *zone = &pgdat->node_zones[zoneid];
pgdat             749 mm/compaction.c static bool too_many_isolated(pg_data_t *pgdat)
pgdat             753 mm/compaction.c 	inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
pgdat             754 mm/compaction.c 			node_page_state(pgdat, NR_INACTIVE_ANON);
pgdat             755 mm/compaction.c 	active = node_page_state(pgdat, NR_ACTIVE_FILE) +
pgdat             756 mm/compaction.c 			node_page_state(pgdat, NR_ACTIVE_ANON);
pgdat             757 mm/compaction.c 	isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
pgdat             758 mm/compaction.c 			node_page_state(pgdat, NR_ISOLATED_ANON);
pgdat             785 mm/compaction.c 	pg_data_t *pgdat = cc->zone->zone_pgdat;
pgdat             801 mm/compaction.c 	while (unlikely(too_many_isolated(pgdat))) {
pgdat             850 mm/compaction.c 		    && compact_unlock_should_abort(&pgdat->lru_lock,
pgdat             923 mm/compaction.c 					spin_unlock_irqrestore(&pgdat->lru_lock,
pgdat             953 mm/compaction.c 			locked = compact_lock_irqsave(&pgdat->lru_lock,
pgdat             978 mm/compaction.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            1020 mm/compaction.c 				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat            1047 mm/compaction.c 		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat            2411 mm/compaction.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            2425 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
pgdat            2495 mm/compaction.c static inline bool kcompactd_work_requested(pg_data_t *pgdat)
pgdat            2497 mm/compaction.c 	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
pgdat            2500 mm/compaction.c static bool kcompactd_node_suitable(pg_data_t *pgdat)
pgdat            2504 mm/compaction.c 	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
pgdat            2507 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
pgdat            2512 mm/compaction.c 		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
pgdat            2520 mm/compaction.c static void kcompactd_do_work(pg_data_t *pgdat)
pgdat            2529 mm/compaction.c 		.order = pgdat->kcompactd_max_order,
pgdat            2530 mm/compaction.c 		.search_order = pgdat->kcompactd_max_order,
pgdat            2531 mm/compaction.c 		.classzone_idx = pgdat->kcompactd_classzone_idx,
pgdat            2536 mm/compaction.c 	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
pgdat            2543 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
pgdat            2592 mm/compaction.c 	if (pgdat->kcompactd_max_order <= cc.order)
pgdat            2593 mm/compaction.c 		pgdat->kcompactd_max_order = 0;
pgdat            2594 mm/compaction.c 	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
pgdat            2595 mm/compaction.c 		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
pgdat            2598 mm/compaction.c void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
pgdat            2603 mm/compaction.c 	if (pgdat->kcompactd_max_order < order)
pgdat            2604 mm/compaction.c 		pgdat->kcompactd_max_order = order;
pgdat            2606 mm/compaction.c 	if (pgdat->kcompactd_classzone_idx > classzone_idx)
pgdat            2607 mm/compaction.c 		pgdat->kcompactd_classzone_idx = classzone_idx;
pgdat            2613 mm/compaction.c 	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
pgdat            2616 mm/compaction.c 	if (!kcompactd_node_suitable(pgdat))
pgdat            2619 mm/compaction.c 	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
pgdat            2621 mm/compaction.c 	wake_up_interruptible(&pgdat->kcompactd_wait);
pgdat            2630 mm/compaction.c 	pg_data_t *pgdat = (pg_data_t*)p;
pgdat            2633 mm/compaction.c 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
pgdat            2640 mm/compaction.c 	pgdat->kcompactd_max_order = 0;
pgdat            2641 mm/compaction.c 	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
pgdat            2646 mm/compaction.c 		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
pgdat            2647 mm/compaction.c 		wait_event_freezable(pgdat->kcompactd_wait,
pgdat            2648 mm/compaction.c 				kcompactd_work_requested(pgdat));
pgdat            2651 mm/compaction.c 		kcompactd_do_work(pgdat);
pgdat            2664 mm/compaction.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            2667 mm/compaction.c 	if (pgdat->kcompactd)
pgdat            2670 mm/compaction.c 	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
pgdat            2671 mm/compaction.c 	if (IS_ERR(pgdat->kcompactd)) {
pgdat            2673 mm/compaction.c 		ret = PTR_ERR(pgdat->kcompactd);
pgdat            2674 mm/compaction.c 		pgdat->kcompactd = NULL;
pgdat            2704 mm/compaction.c 		pg_data_t *pgdat = NODE_DATA(nid);
pgdat            2707 mm/compaction.c 		mask = cpumask_of_node(pgdat->node_id);
pgdat            2711 mm/compaction.c 			set_cpus_allowed_ptr(pgdat->kcompactd, mask);
pgdat             495 mm/huge_memory.c 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
pgdat             500 mm/huge_memory.c 		return &pgdat->deferred_split_queue;
pgdat             505 mm/huge_memory.c 	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
pgdat             507 mm/huge_memory.c 	return &pgdat->deferred_split_queue;
pgdat            2502 mm/huge_memory.c 	pg_data_t *pgdat = page_pgdat(head);
pgdat            2508 mm/huge_memory.c 	lruvec = mem_cgroup_page_lruvec(head, pgdat);
pgdat            2558 mm/huge_memory.c 	spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat             479 mm/internal.h  static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
pgdat            1948 mm/memblock.c  void reset_node_managed_pages(pg_data_t *pgdat)
pgdat            1952 mm/memblock.c  	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
pgdat            1958 mm/memblock.c  	struct pglist_data *pgdat;
pgdat            1963 mm/memblock.c  	for_each_online_pgdat(pgdat)
pgdat            1964 mm/memblock.c  		reset_node_managed_pages(pgdat);
pgdat             738 mm/memcontrol.c 	pg_data_t *pgdat = lruvec_pgdat(lruvec);
pgdat             744 mm/memcontrol.c 	__mod_node_page_state(pgdat, idx, val);
pgdat             762 mm/memcontrol.c 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
pgdat             772 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
pgdat             781 mm/memcontrol.c 		__mod_node_page_state(pgdat, idx, val);
pgdat             783 mm/memcontrol.c 		lruvec = mem_cgroup_lruvec(pgdat, memcg);
pgdat            1067 mm/memcontrol.c 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
pgdat            1247 mm/memcontrol.c struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
pgdat            1254 mm/memcontrol.c 		lruvec = &pgdat->lruvec;
pgdat            1274 mm/memcontrol.c 	if (unlikely(lruvec->pgdat != pgdat))
pgdat            1275 mm/memcontrol.c 		lruvec->pgdat = pgdat;
pgdat            1710 mm/memcontrol.c 				   pg_data_t *pgdat,
pgdat            1720 mm/memcontrol.c 		.pgdat = pgdat,
pgdat            1751 mm/memcontrol.c 					pgdat, &nr_scanned);
pgdat            2731 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
pgdat            2733 mm/memcontrol.c 	spin_lock_irq(&pgdat->lru_lock);
pgdat            2737 mm/memcontrol.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            2747 mm/memcontrol.c 	pg_data_t *pgdat = page_pgdat(page);
pgdat            2752 mm/memcontrol.c 		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            2757 mm/memcontrol.c 	spin_unlock_irq(&pgdat->lru_lock);
pgdat            3224 mm/memcontrol.c unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
pgdat            3239 mm/memcontrol.c 	mctz = soft_limit_tree_node(pgdat->node_id);
pgdat            3263 mm/memcontrol.c 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
pgdat            3973 mm/memcontrol.c 		pg_data_t *pgdat;
pgdat            3979 mm/memcontrol.c 		for_each_online_pgdat(pgdat) {
pgdat            3980 mm/memcontrol.c 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
pgdat            5489 mm/memcontrol.c 	struct pglist_data *pgdat;
pgdat            5513 mm/memcontrol.c 	pgdat = page_pgdat(page);
pgdat            5514 mm/memcontrol.c 	from_vec = mem_cgroup_lruvec(pgdat, from);
pgdat            5515 mm/memcontrol.c 	to_vec = mem_cgroup_lruvec(pgdat, to);
pgdat             225 mm/memory_hotplug.c void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
pgdat             228 mm/memory_hotplug.c 	int node = pgdat->node_id;
pgdat             232 mm/memory_hotplug.c 	page = virt_to_page(pgdat);
pgdat             237 mm/memory_hotplug.c 	pfn = pgdat->node_start_pfn;
pgdat             238 mm/memory_hotplug.c 	end_pfn = pgdat_end_pfn(pgdat);
pgdat             439 mm/memory_hotplug.c static void update_pgdat_span(struct pglist_data *pgdat)
pgdat             444 mm/memory_hotplug.c 	for (zone = pgdat->node_zones;
pgdat             445 mm/memory_hotplug.c 	     zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
pgdat             464 mm/memory_hotplug.c 	pgdat->node_start_pfn = node_start_pfn;
pgdat             465 mm/memory_hotplug.c 	pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
pgdat             472 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
pgdat             489 mm/memory_hotplug.c 	update_pgdat_span(pgdat);
pgdat             686 mm/memory_hotplug.c static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
pgdat             689 mm/memory_hotplug.c 	unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
pgdat             691 mm/memory_hotplug.c 	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
pgdat             692 mm/memory_hotplug.c 		pgdat->node_start_pfn = start_pfn;
pgdat             694 mm/memory_hotplug.c 	pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
pgdat             705 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
pgdat             706 mm/memory_hotplug.c 	int nid = pgdat->node_id;
pgdat             712 mm/memory_hotplug.c 	pgdat_resize_lock(pgdat, &flags);
pgdat             718 mm/memory_hotplug.c 	resize_pgdat_range(pgdat, start_pfn, nr_pages);
pgdat             719 mm/memory_hotplug.c 	pgdat_resize_unlock(pgdat, &flags);
pgdat             741 mm/memory_hotplug.c 	struct pglist_data *pgdat = NODE_DATA(nid);
pgdat             745 mm/memory_hotplug.c 		struct zone *zone = &pgdat->node_zones[zid];
pgdat             751 mm/memory_hotplug.c 	return &pgdat->node_zones[ZONE_NORMAL];
pgdat             881 mm/memory_hotplug.c static void reset_node_present_pages(pg_data_t *pgdat)
pgdat             885 mm/memory_hotplug.c 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
pgdat             888 mm/memory_hotplug.c 	pgdat->node_present_pages = 0;
pgdat             894 mm/memory_hotplug.c 	struct pglist_data *pgdat;
pgdat             897 mm/memory_hotplug.c 	pgdat = NODE_DATA(nid);
pgdat             898 mm/memory_hotplug.c 	if (!pgdat) {
pgdat             899 mm/memory_hotplug.c 		pgdat = arch_alloc_nodedata(nid);
pgdat             900 mm/memory_hotplug.c 		if (!pgdat)
pgdat             903 mm/memory_hotplug.c 		pgdat->per_cpu_nodestats =
pgdat             905 mm/memory_hotplug.c 		arch_refresh_nodedata(nid, pgdat);
pgdat             913 mm/memory_hotplug.c 		pgdat->nr_zones = 0;
pgdat             914 mm/memory_hotplug.c 		pgdat->kswapd_order = 0;
pgdat             915 mm/memory_hotplug.c 		pgdat->kswapd_classzone_idx = 0;
pgdat             919 mm/memory_hotplug.c 			p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
pgdat             926 mm/memory_hotplug.c 	pgdat->node_id = nid;
pgdat             927 mm/memory_hotplug.c 	pgdat->node_start_pfn = start_pfn;
pgdat             936 mm/memory_hotplug.c 	build_all_zonelists(pgdat);
pgdat             943 mm/memory_hotplug.c 	reset_node_managed_pages(pgdat);
pgdat             944 mm/memory_hotplug.c 	reset_node_present_pages(pgdat);
pgdat             946 mm/memory_hotplug.c 	return pgdat;
pgdat             951 mm/memory_hotplug.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat             954 mm/memory_hotplug.c 	free_percpu(pgdat->per_cpu_nodestats);
pgdat             955 mm/memory_hotplug.c 	arch_free_nodedata(pgdat);
pgdat             973 mm/memory_hotplug.c 	pg_data_t *pgdat;
pgdat             979 mm/memory_hotplug.c 	pgdat = hotadd_new_pgdat(nid, start);
pgdat             980 mm/memory_hotplug.c 	if (!pgdat) {
pgdat            1424 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
pgdat            1441 mm/memory_hotplug.c 		present_pages += pgdat->node_zones[zt].present_pages;
pgdat            1454 mm/memory_hotplug.c 	present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages;
pgdat            1469 mm/memory_hotplug.c 	present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
pgdat            1650 mm/memory_hotplug.c static int check_cpu_on_node(pg_data_t *pgdat)
pgdat            1655 mm/memory_hotplug.c 		if (cpu_to_node(cpu) == pgdat->node_id)
pgdat            1689 mm/memory_hotplug.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            1697 mm/memory_hotplug.c 	if (pgdat->node_spanned_pages)
pgdat            1709 mm/memory_hotplug.c 	if (check_cpu_on_node(pgdat))
pgdat            1883 mm/migrate.c   static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
pgdat            1888 mm/migrate.c   	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
pgdat            1889 mm/migrate.c   		struct zone *zone = pgdat->node_zones + z;
pgdat            1920 mm/migrate.c   static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
pgdat            1927 mm/migrate.c   	if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
pgdat            1972 mm/migrate.c   	pg_data_t *pgdat = NODE_DATA(node);
pgdat            1992 mm/migrate.c   	isolated = numamigrate_isolate_page(pgdat, page);
pgdat            2031 mm/migrate.c   	pg_data_t *pgdat = NODE_DATA(node);
pgdat            2044 mm/migrate.c   	isolated = numamigrate_isolate_page(pgdat, page);
pgdat             185 mm/mlock.c     	pg_data_t *pgdat = page_pgdat(page);
pgdat             197 mm/mlock.c     	spin_lock_irq(&pgdat->lru_lock);
pgdat             209 mm/mlock.c     		spin_unlock_irq(&pgdat->lru_lock);
pgdat             216 mm/mlock.c     	spin_unlock_irq(&pgdat->lru_lock);
pgdat              34 mm/mm_init.c   		pg_data_t *pgdat = NODE_DATA(nid);
pgdat              46 mm/mm_init.c   			zonelist = &pgdat->node_zonelists[listid];
pgdat              47 mm/mm_init.c   			zone = &pgdat->node_zones[zoneid];
pgdat              18 mm/mmzone.c    struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
pgdat              20 mm/mmzone.c    	int nid = next_online_node(pgdat->node_id);
pgdat              32 mm/mmzone.c    	pg_data_t *pgdat = zone->zone_pgdat;
pgdat              34 mm/mmzone.c    	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
pgdat              37 mm/mmzone.c    		pgdat = next_online_pgdat(pgdat);
pgdat              38 mm/mmzone.c    		if (pgdat)
pgdat              39 mm/mmzone.c    			zone = pgdat->node_zones;
pgdat             277 mm/page-writeback.c static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
pgdat             283 mm/page-writeback.c 		struct zone *zone = pgdat->node_zones + z;
pgdat             296 mm/page-writeback.c 	nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
pgdat             298 mm/page-writeback.c 	nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
pgdat             299 mm/page-writeback.c 	nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
pgdat             477 mm/page-writeback.c static unsigned long node_dirty_limit(struct pglist_data *pgdat)
pgdat             479 mm/page-writeback.c 	unsigned long node_memory = node_dirtyable_memory(pgdat);
pgdat             502 mm/page-writeback.c bool node_dirty_ok(struct pglist_data *pgdat)
pgdat             504 mm/page-writeback.c 	unsigned long limit = node_dirty_limit(pgdat);
pgdat             507 mm/page-writeback.c 	nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
pgdat             508 mm/page-writeback.c 	nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS);
pgdat             509 mm/page-writeback.c 	nr_pages += node_page_state(pgdat, NR_WRITEBACK);
pgdat            1361 mm/page_alloc.c 	pg_data_t *pgdat;
pgdat            1368 mm/page_alloc.c 	pgdat = NODE_DATA(nid);
pgdat            1371 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zid];
pgdat            1770 mm/page_alloc.c 	pg_data_t *pgdat = data;
pgdat            1771 mm/page_alloc.c 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
pgdat            1783 mm/page_alloc.c 	pgdat_resize_lock(pgdat, &flags);
pgdat            1784 mm/page_alloc.c 	first_init_pfn = pgdat->first_deferred_pfn;
pgdat            1786 mm/page_alloc.c 		pgdat_resize_unlock(pgdat, &flags);
pgdat            1792 mm/page_alloc.c 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
pgdat            1793 mm/page_alloc.c 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
pgdat            1794 mm/page_alloc.c 	pgdat->first_deferred_pfn = ULONG_MAX;
pgdat            1798 mm/page_alloc.c 		zone = pgdat->node_zones + zid;
pgdat            1816 mm/page_alloc.c 	pgdat_resize_unlock(pgdat, &flags);
pgdat            1822 mm/page_alloc.c 		pgdat->node_id,	nr_pages, jiffies_to_msecs(jiffies - start));
pgdat            1847 mm/page_alloc.c 	pg_data_t *pgdat = zone->zone_pgdat;
pgdat            1848 mm/page_alloc.c 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
pgdat            1854 mm/page_alloc.c 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
pgdat            1857 mm/page_alloc.c 	pgdat_resize_lock(pgdat, &flags);
pgdat            1866 mm/page_alloc.c 		pgdat_resize_unlock(pgdat, &flags);
pgdat            1874 mm/page_alloc.c 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
pgdat            1875 mm/page_alloc.c 		pgdat_resize_unlock(pgdat, &flags);
pgdat            1882 mm/page_alloc.c 		pgdat->first_deferred_pfn = ULONG_MAX;
pgdat            1883 mm/page_alloc.c 		pgdat_resize_unlock(pgdat, &flags);
pgdat            1908 mm/page_alloc.c 	pgdat->first_deferred_pfn = spfn;
pgdat            1909 mm/page_alloc.c 	pgdat_resize_unlock(pgdat, &flags);
pgdat            5164 mm/page_alloc.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            5167 mm/page_alloc.c 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
pgdat            5169 mm/page_alloc.c 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
pgdat            5173 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zone_type];
pgdat            5253 mm/page_alloc.c 	pg_data_t *pgdat;
pgdat            5289 mm/page_alloc.c 	for_each_online_pgdat(pgdat) {
pgdat            5290 mm/page_alloc.c 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
pgdat            5314 mm/page_alloc.c 			pgdat->node_id,
pgdat            5315 mm/page_alloc.c 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
pgdat            5316 mm/page_alloc.c 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
pgdat            5317 mm/page_alloc.c 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
pgdat            5318 mm/page_alloc.c 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
pgdat            5319 mm/page_alloc.c 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
pgdat            5320 mm/page_alloc.c 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
pgdat            5321 mm/page_alloc.c 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
pgdat            5322 mm/page_alloc.c 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
pgdat            5323 mm/page_alloc.c 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
pgdat            5324 mm/page_alloc.c 			K(node_page_state(pgdat, NR_WRITEBACK)),
pgdat            5325 mm/page_alloc.c 			K(node_page_state(pgdat, NR_SHMEM)),
pgdat            5327 mm/page_alloc.c 			K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
pgdat            5328 mm/page_alloc.c 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
pgdat            5330 mm/page_alloc.c 			K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
pgdat            5332 mm/page_alloc.c 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
pgdat            5333 mm/page_alloc.c 			K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
pgdat            5334 mm/page_alloc.c 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
pgdat            5449 mm/page_alloc.c static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
pgdat            5457 mm/page_alloc.c 		zone = pgdat->node_zones + zone_type;
pgdat            5587 mm/page_alloc.c static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
pgdat            5593 mm/page_alloc.c 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
pgdat            5610 mm/page_alloc.c static void build_thisnode_zonelists(pg_data_t *pgdat)
pgdat            5615 mm/page_alloc.c 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
pgdat            5616 mm/page_alloc.c 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
pgdat            5629 mm/page_alloc.c static void build_zonelists(pg_data_t *pgdat)
pgdat            5637 mm/page_alloc.c 	local_node = pgdat->node_id;
pgdat            5658 mm/page_alloc.c 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
pgdat            5659 mm/page_alloc.c 	build_thisnode_zonelists(pgdat);
pgdat            5684 mm/page_alloc.c static void build_zonelists(pg_data_t *pgdat)
pgdat            5690 mm/page_alloc.c 	local_node = pgdat->node_id;
pgdat            5692 mm/page_alloc.c 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
pgdat            5693 mm/page_alloc.c 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
pgdat            5763 mm/page_alloc.c 			pg_data_t *pgdat = NODE_DATA(nid);
pgdat            5765 mm/page_alloc.c 			build_zonelists(pgdat);
pgdat            5818 mm/page_alloc.c void __ref build_all_zonelists(pg_data_t *pgdat)
pgdat            5823 mm/page_alloc.c 		__build_all_zonelists(pgdat);
pgdat            5952 mm/page_alloc.c 	struct pglist_data *pgdat = zone->zone_pgdat;
pgdat            5956 mm/page_alloc.c 	int nid = pgdat->node_id;
pgdat            6181 mm/page_alloc.c 	struct pglist_data *pgdat;
pgdat            6187 mm/page_alloc.c 	for_each_online_pgdat(pgdat)
pgdat            6188 mm/page_alloc.c 		pgdat->per_cpu_nodestats =
pgdat            6211 mm/page_alloc.c 	struct pglist_data *pgdat = zone->zone_pgdat;
pgdat            6214 mm/page_alloc.c 	if (zone_idx > pgdat->nr_zones)
pgdat            6215 mm/page_alloc.c 		pgdat->nr_zones = zone_idx;
pgdat            6221 mm/page_alloc.c 			pgdat->node_id,
pgdat            6539 mm/page_alloc.c static void __init calculate_node_totalpages(struct pglist_data *pgdat,
pgdat            6549 mm/page_alloc.c 		struct zone *zone = pgdat->node_zones + i;
pgdat            6553 mm/page_alloc.c 		size = zone_spanned_pages_in_node(pgdat->node_id, i,
pgdat            6559 mm/page_alloc.c 		real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
pgdat            6573 mm/page_alloc.c 	pgdat->node_spanned_pages = totalpages;
pgdat            6574 mm/page_alloc.c 	pgdat->node_present_pages = realtotalpages;
pgdat            6575 mm/page_alloc.c 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
pgdat            6600 mm/page_alloc.c static void __ref setup_usemap(struct pglist_data *pgdat,
pgdat            6610 mm/page_alloc.c 					    pgdat->node_id);
pgdat            6613 mm/page_alloc.c 			      usemapsize, zone->name, pgdat->node_id);
pgdat            6617 mm/page_alloc.c static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
pgdat            6679 mm/page_alloc.c static void pgdat_init_split_queue(struct pglist_data *pgdat)
pgdat            6681 mm/page_alloc.c 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
pgdat            6688 mm/page_alloc.c static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
pgdat            6692 mm/page_alloc.c static void pgdat_init_kcompactd(struct pglist_data *pgdat)
pgdat            6694 mm/page_alloc.c 	init_waitqueue_head(&pgdat->kcompactd_wait);
pgdat            6697 mm/page_alloc.c static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
pgdat            6700 mm/page_alloc.c static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
pgdat            6702 mm/page_alloc.c 	pgdat_resize_init(pgdat);
pgdat            6704 mm/page_alloc.c 	pgdat_init_split_queue(pgdat);
pgdat            6705 mm/page_alloc.c 	pgdat_init_kcompactd(pgdat);
pgdat            6707 mm/page_alloc.c 	init_waitqueue_head(&pgdat->kswapd_wait);
pgdat            6708 mm/page_alloc.c 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
pgdat            6710 mm/page_alloc.c 	pgdat_page_ext_init(pgdat);
pgdat            6711 mm/page_alloc.c 	spin_lock_init(&pgdat->lru_lock);
pgdat            6712 mm/page_alloc.c 	lruvec_init(node_lruvec(pgdat));
pgdat            6738 mm/page_alloc.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            6740 mm/page_alloc.c 	pgdat_init_internals(pgdat);
pgdat            6742 mm/page_alloc.c 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
pgdat            6755 mm/page_alloc.c static void __init free_area_init_core(struct pglist_data *pgdat)
pgdat            6758 mm/page_alloc.c 	int nid = pgdat->node_id;
pgdat            6760 mm/page_alloc.c 	pgdat_init_internals(pgdat);
pgdat            6761 mm/page_alloc.c 	pgdat->per_cpu_nodestats = &boot_nodestats;
pgdat            6764 mm/page_alloc.c 		struct zone *zone = pgdat->node_zones + j;
pgdat            6814 mm/page_alloc.c 		setup_usemap(pgdat, zone, zone_start_pfn, size);
pgdat            6821 mm/page_alloc.c static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
pgdat            6827 mm/page_alloc.c 	if (!pgdat->node_spanned_pages)
pgdat            6830 mm/page_alloc.c 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
pgdat            6831 mm/page_alloc.c 	offset = pgdat->node_start_pfn - start;
pgdat            6833 mm/page_alloc.c 	if (!pgdat->node_mem_map) {
pgdat            6842 mm/page_alloc.c 		end = pgdat_end_pfn(pgdat);
pgdat            6846 mm/page_alloc.c 					  pgdat->node_id);
pgdat            6849 mm/page_alloc.c 			      size, pgdat->node_id);
pgdat            6850 mm/page_alloc.c 		pgdat->node_mem_map = map + offset;
pgdat            6853 mm/page_alloc.c 				__func__, pgdat->node_id, (unsigned long)pgdat,
pgdat            6854 mm/page_alloc.c 				(unsigned long)pgdat->node_mem_map);
pgdat            6859 mm/page_alloc.c 	if (pgdat == NODE_DATA(0)) {
pgdat            6862 mm/page_alloc.c 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
pgdat            6869 mm/page_alloc.c static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
pgdat            6873 mm/page_alloc.c static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
pgdat            6875 mm/page_alloc.c 	pgdat->first_deferred_pfn = ULONG_MAX;
pgdat            6878 mm/page_alloc.c static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
pgdat            6885 mm/page_alloc.c 	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            6890 mm/page_alloc.c 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
pgdat            6892 mm/page_alloc.c 	pgdat->node_id = nid;
pgdat            6893 mm/page_alloc.c 	pgdat->node_start_pfn = node_start_pfn;
pgdat            6894 mm/page_alloc.c 	pgdat->per_cpu_nodestats = NULL;
pgdat            6903 mm/page_alloc.c 	calculate_node_totalpages(pgdat, start_pfn, end_pfn,
pgdat            6906 mm/page_alloc.c 	alloc_node_mem_map(pgdat);
pgdat            6907 mm/page_alloc.c 	pgdat_set_deferred_range(pgdat);
pgdat            6909 mm/page_alloc.c 	free_area_init_core(pgdat);
pgdat            7313 mm/page_alloc.c static void check_for_memory(pg_data_t *pgdat, int nid)
pgdat            7318 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zone_type];
pgdat            7413 mm/page_alloc.c 		pg_data_t *pgdat = NODE_DATA(nid);
pgdat            7418 mm/page_alloc.c 		if (pgdat->node_present_pages)
pgdat            7420 mm/page_alloc.c 		check_for_memory(pgdat, nid);
pgdat            7670 mm/page_alloc.c 	struct pglist_data *pgdat;
pgdat            7674 mm/page_alloc.c 	for_each_online_pgdat(pgdat) {
pgdat            7676 mm/page_alloc.c 		pgdat->totalreserve_pages = 0;
pgdat            7679 mm/page_alloc.c 			struct zone *zone = pgdat->node_zones + i;
pgdat            7695 mm/page_alloc.c 			pgdat->totalreserve_pages += max;
pgdat            7711 mm/page_alloc.c 	struct pglist_data *pgdat;
pgdat            7714 mm/page_alloc.c 	for_each_online_pgdat(pgdat) {
pgdat            7716 mm/page_alloc.c 			struct zone *zone = pgdat->node_zones + j;
pgdat            7726 mm/page_alloc.c 				lower_zone = pgdat->node_zones + idx;
pgdat            7928 mm/page_alloc.c 	pg_data_t *pgdat;
pgdat            7931 mm/page_alloc.c 	for_each_online_pgdat(pgdat)
pgdat            7932 mm/page_alloc.c 		pgdat->min_unmapped_pages = 0;
pgdat            7956 mm/page_alloc.c 	pg_data_t *pgdat;
pgdat            7959 mm/page_alloc.c 	for_each_online_pgdat(pgdat)
pgdat            7960 mm/page_alloc.c 		pgdat->min_slab_pages = 0;
pgdat             110 mm/page_ext.c  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
pgdat             112 mm/page_ext.c  	pgdat->node_page_ext = NULL;
pgdat             411 mm/page_ext.c  void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
pgdat              34 mm/page_idle.c 	pg_data_t *pgdat;
pgdat              44 mm/page_idle.c 	pgdat = page_pgdat(page);
pgdat              45 mm/page_idle.c 	spin_lock_irq(&pgdat->lru_lock);
pgdat              50 mm/page_idle.c 	spin_unlock_irq(&pgdat->lru_lock);
pgdat             254 mm/page_owner.c 				       pg_data_t *pgdat, struct zone *zone)
pgdat             331 mm/page_owner.c 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
pgdat             546 mm/page_owner.c static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
pgdat             615 mm/page_owner.c 		pgdat->node_id, zone->name, count);
pgdat             618 mm/page_owner.c static void init_zones_in_node(pg_data_t *pgdat)
pgdat             621 mm/page_owner.c 	struct zone *node_zones = pgdat->node_zones;
pgdat             627 mm/page_owner.c 		init_pages_in_zone(pgdat, zone);
pgdat             633 mm/page_owner.c 	pg_data_t *pgdat;
pgdat             635 mm/page_owner.c 	for_each_online_pgdat(pgdat)
pgdat             636 mm/page_owner.c 		init_zones_in_node(pgdat);
pgdat             178 mm/shuffle.c   void __meminit __shuffle_free_memory(pg_data_t *pgdat)
pgdat             182 mm/shuffle.c   	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
pgdat              24 mm/shuffle.h   extern void __shuffle_free_memory(pg_data_t *pgdat);
pgdat              25 mm/shuffle.h   static inline void shuffle_free_memory(pg_data_t *pgdat)
pgdat              29 mm/shuffle.h   	__shuffle_free_memory(pgdat);
pgdat              47 mm/shuffle.h   static inline void shuffle_free_memory(pg_data_t *pgdat)
pgdat             355 mm/sparse.c    sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
pgdat             371 mm/sparse.c    	goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
pgdat             389 mm/sparse.c    	struct pglist_data *pgdat = NODE_DATA(nid);
pgdat             399 mm/sparse.c    	pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
pgdat             427 mm/sparse.c    sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
pgdat             430 mm/sparse.c    	return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
pgdat              63 mm/swap.c      		pg_data_t *pgdat = page_pgdat(page);
pgdat              67 mm/swap.c      		spin_lock_irqsave(&pgdat->lru_lock, flags);
pgdat              68 mm/swap.c      		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat              72 mm/swap.c      		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat             195 mm/swap.c      	struct pglist_data *pgdat = NULL;
pgdat             203 mm/swap.c      		if (pagepgdat != pgdat) {
pgdat             204 mm/swap.c      			if (pgdat)
pgdat             205 mm/swap.c      				spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat             206 mm/swap.c      			pgdat = pagepgdat;
pgdat             207 mm/swap.c      			spin_lock_irqsave(&pgdat->lru_lock, flags);
pgdat             210 mm/swap.c      		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat             213 mm/swap.c      	if (pgdat)
pgdat             214 mm/swap.c      		spin_unlock_irqrestore(&pgdat->lru_lock, flags);
pgdat             327 mm/swap.c      	pg_data_t *pgdat = page_pgdat(page);
pgdat             330 mm/swap.c      	spin_lock_irq(&pgdat->lru_lock);
pgdat             331 mm/swap.c      	__activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
pgdat             332 mm/swap.c      	spin_unlock_irq(&pgdat->lru_lock);
pgdat             815 mm/swap.c      			struct pglist_data *pgdat = page_pgdat(page);
pgdat             817 mm/swap.c      			if (pgdat != locked_pgdat) {
pgdat             822 mm/swap.c      				locked_pgdat = pgdat;
pgdat             273 mm/vmscan.c    static void set_memcg_congestion(pg_data_t *pgdat,
pgdat             282 mm/vmscan.c    	mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
pgdat             286 mm/vmscan.c    static bool memcg_congested(pg_data_t *pgdat,
pgdat             291 mm/vmscan.c    	mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
pgdat             315 mm/vmscan.c    static inline void set_memcg_congestion(struct pglist_data *pgdat,
pgdat             320 mm/vmscan.c    static inline bool memcg_congested(struct pglist_data *pgdat,
pgdat            1120 mm/vmscan.c    				      struct pglist_data *pgdat,
pgdat            1237 mm/vmscan.c    			    test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
pgdat            1371 mm/vmscan.c    			     !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
pgdat            1804 mm/vmscan.c    		pg_data_t *pgdat = page_pgdat(page);
pgdat            1807 mm/vmscan.c    		spin_lock_irq(&pgdat->lru_lock);
pgdat            1808 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            1816 mm/vmscan.c    		spin_unlock_irq(&pgdat->lru_lock);
pgdat            1828 mm/vmscan.c    static int too_many_isolated(struct pglist_data *pgdat, int file,
pgdat            1840 mm/vmscan.c    		inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
pgdat            1841 mm/vmscan.c    		isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
pgdat            1843 mm/vmscan.c    		inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
pgdat            1844 mm/vmscan.c    		isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
pgdat            1881 mm/vmscan.c    	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat            1892 mm/vmscan.c    			spin_unlock_irq(&pgdat->lru_lock);
pgdat            1894 mm/vmscan.c    			spin_lock_irq(&pgdat->lru_lock);
pgdat            1897 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            1912 mm/vmscan.c    				spin_unlock_irq(&pgdat->lru_lock);
pgdat            1914 mm/vmscan.c    				spin_lock_irq(&pgdat->lru_lock);
pgdat            1958 mm/vmscan.c    	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat            1962 mm/vmscan.c    	while (unlikely(too_many_isolated(pgdat, file, sc))) {
pgdat            1977 mm/vmscan.c    	spin_lock_irq(&pgdat->lru_lock);
pgdat            1982 mm/vmscan.c    	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
pgdat            1989 mm/vmscan.c    	spin_unlock_irq(&pgdat->lru_lock);
pgdat            1994 mm/vmscan.c    	nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
pgdat            1997 mm/vmscan.c    	spin_lock_irq(&pgdat->lru_lock);
pgdat            2008 mm/vmscan.c    	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
pgdat            2010 mm/vmscan.c    	spin_unlock_irq(&pgdat->lru_lock);
pgdat            2038 mm/vmscan.c    	trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
pgdat            2059 mm/vmscan.c    	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat            2063 mm/vmscan.c    	spin_lock_irq(&pgdat->lru_lock);
pgdat            2068 mm/vmscan.c    	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
pgdat            2074 mm/vmscan.c    	spin_unlock_irq(&pgdat->lru_lock);
pgdat            2120 mm/vmscan.c    	spin_lock_irq(&pgdat->lru_lock);
pgdat            2137 mm/vmscan.c    	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
pgdat            2138 mm/vmscan.c    	spin_unlock_irq(&pgdat->lru_lock);
pgdat            2142 mm/vmscan.c    	trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
pgdat            2234 mm/vmscan.c    	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat            2268 mm/vmscan.c    		trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
pgdat            2312 mm/vmscan.c    	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
pgdat            2362 mm/vmscan.c    		pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
pgdat            2363 mm/vmscan.c    		pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
pgdat            2364 mm/vmscan.c    			   node_page_state(pgdat, NR_INACTIVE_FILE);
pgdat            2367 mm/vmscan.c    			struct zone *zone = &pgdat->node_zones[z];
pgdat            2430 mm/vmscan.c    	spin_lock_irq(&pgdat->lru_lock);
pgdat            2451 mm/vmscan.c    	spin_unlock_irq(&pgdat->lru_lock);
pgdat            2563 mm/vmscan.c    static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
pgdat            2566 mm/vmscan.c    	struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
pgdat            2697 mm/vmscan.c    static inline bool should_continue_reclaim(struct pglist_data *pgdat,
pgdat            2724 mm/vmscan.c    		struct zone *zone = &pgdat->node_zones[z];
pgdat            2743 mm/vmscan.c    	inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
pgdat            2745 mm/vmscan.c    		inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
pgdat            2750 mm/vmscan.c    static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
pgdat            2752 mm/vmscan.c    	return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
pgdat            2753 mm/vmscan.c    		(memcg && memcg_congested(pgdat, memcg));
pgdat            2756 mm/vmscan.c    static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
pgdat            2811 mm/vmscan.c    			shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
pgdat            2814 mm/vmscan.c    			shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
pgdat            2856 mm/vmscan.c    				set_bit(PGDAT_WRITEBACK, &pgdat->flags);
pgdat            2864 mm/vmscan.c    				set_bit(PGDAT_CONGESTED, &pgdat->flags);
pgdat            2868 mm/vmscan.c    				set_bit(PGDAT_DIRTY, &pgdat->flags);
pgdat            2886 mm/vmscan.c    			set_memcg_congestion(pgdat, root, true);
pgdat            2895 mm/vmscan.c    		   current_may_throttle() && pgdat_memcg_congested(pgdat, root))
pgdat            2898 mm/vmscan.c    	} while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
pgdat            2908 mm/vmscan.c    		pgdat->kswapd_failures = 0;
pgdat            3038 mm/vmscan.c    static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
pgdat            3047 mm/vmscan.c    		lruvec = mem_cgroup_lruvec(pgdat, memcg);
pgdat            3132 mm/vmscan.c    static bool allow_direct_reclaim(pg_data_t *pgdat)
pgdat            3140 mm/vmscan.c    	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
pgdat            3144 mm/vmscan.c    		zone = &pgdat->node_zones[i];
pgdat            3162 mm/vmscan.c    	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
pgdat            3163 mm/vmscan.c    		pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
pgdat            3165 mm/vmscan.c    		wake_up_interruptible(&pgdat->kswapd_wait);
pgdat            3185 mm/vmscan.c    	pg_data_t *pgdat = NULL;
pgdat            3224 mm/vmscan.c    		pgdat = zone->zone_pgdat;
pgdat            3225 mm/vmscan.c    		if (allow_direct_reclaim(pgdat))
pgdat            3231 mm/vmscan.c    	if (!pgdat)
pgdat            3246 mm/vmscan.c    		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
pgdat            3247 mm/vmscan.c    			allow_direct_reclaim(pgdat), HZ);
pgdat            3254 mm/vmscan.c    		allow_direct_reclaim(pgdat));
pgdat            3312 mm/vmscan.c    						pg_data_t *pgdat,
pgdat            3340 mm/vmscan.c    	shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
pgdat            3398 mm/vmscan.c    static void age_active_anon(struct pglist_data *pgdat,
pgdat            3408 mm/vmscan.c    		struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
pgdat            3418 mm/vmscan.c    static bool pgdat_watermark_boosted(pg_data_t *pgdat, int classzone_idx)
pgdat            3431 mm/vmscan.c    		zone = pgdat->node_zones + i;
pgdat            3446 mm/vmscan.c    static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
pgdat            3457 mm/vmscan.c    		zone = pgdat->node_zones + i;
pgdat            3479 mm/vmscan.c    static void clear_pgdat_congested(pg_data_t *pgdat)
pgdat            3481 mm/vmscan.c    	clear_bit(PGDAT_CONGESTED, &pgdat->flags);
pgdat            3482 mm/vmscan.c    	clear_bit(PGDAT_DIRTY, &pgdat->flags);
pgdat            3483 mm/vmscan.c    	clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
pgdat            3492 mm/vmscan.c    static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
pgdat            3507 mm/vmscan.c    	if (waitqueue_active(&pgdat->pfmemalloc_wait))
pgdat            3508 mm/vmscan.c    		wake_up_all(&pgdat->pfmemalloc_wait);
pgdat            3511 mm/vmscan.c    	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
pgdat            3514 mm/vmscan.c    	if (pgdat_balanced(pgdat, order, classzone_idx)) {
pgdat            3515 mm/vmscan.c    		clear_pgdat_congested(pgdat);
pgdat            3530 mm/vmscan.c    static bool kswapd_shrink_node(pg_data_t *pgdat,
pgdat            3539 mm/vmscan.c    		zone = pgdat->node_zones + z;
pgdat            3550 mm/vmscan.c    	shrink_node(pgdat, sc);
pgdat            3578 mm/vmscan.c    static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
pgdat            3607 mm/vmscan.c    		zone = pgdat->node_zones + i;
pgdat            3638 mm/vmscan.c    				zone = pgdat->node_zones + i;
pgdat            3654 mm/vmscan.c    		balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
pgdat            3687 mm/vmscan.c    		age_active_anon(pgdat, &sc);
pgdat            3699 mm/vmscan.c    		nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
pgdat            3708 mm/vmscan.c    		if (kswapd_shrink_node(pgdat, &sc))
pgdat            3716 mm/vmscan.c    		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
pgdat            3717 mm/vmscan.c    				allow_direct_reclaim(pgdat))
pgdat            3718 mm/vmscan.c    			wake_up_all(&pgdat->pfmemalloc_wait);
pgdat            3747 mm/vmscan.c    		pgdat->kswapd_failures++;
pgdat            3759 mm/vmscan.c    			zone = pgdat->node_zones + i;
pgdat            3769 mm/vmscan.c    		wakeup_kcompactd(pgdat, pageblock_order, classzone_idx);
pgdat            3772 mm/vmscan.c    	snapshot_refaults(NULL, pgdat);
pgdat            3793 mm/vmscan.c    static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
pgdat            3796 mm/vmscan.c    	if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
pgdat            3798 mm/vmscan.c    	return pgdat->kswapd_classzone_idx;
pgdat            3801 mm/vmscan.c    static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
pgdat            3810 mm/vmscan.c    	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
pgdat            3819 mm/vmscan.c    	if (prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
pgdat            3826 mm/vmscan.c    		reset_isolation_suitable(pgdat);
pgdat            3832 mm/vmscan.c    		wakeup_kcompactd(pgdat, alloc_order, classzone_idx);
pgdat            3842 mm/vmscan.c    			pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
pgdat            3843 mm/vmscan.c    			pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
pgdat            3846 mm/vmscan.c    		finish_wait(&pgdat->kswapd_wait, &wait);
pgdat            3847 mm/vmscan.c    		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
pgdat            3855 mm/vmscan.c    	    prepare_kswapd_sleep(pgdat, reclaim_order, classzone_idx)) {
pgdat            3856 mm/vmscan.c    		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
pgdat            3866 mm/vmscan.c    		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
pgdat            3871 mm/vmscan.c    		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
pgdat            3878 mm/vmscan.c    	finish_wait(&pgdat->kswapd_wait, &wait);
pgdat            3898 mm/vmscan.c    	pg_data_t *pgdat = (pg_data_t*)p;
pgdat            3900 mm/vmscan.c    	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
pgdat            3920 mm/vmscan.c    	pgdat->kswapd_order = 0;
pgdat            3921 mm/vmscan.c    	pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
pgdat            3925 mm/vmscan.c    		alloc_order = reclaim_order = pgdat->kswapd_order;
pgdat            3926 mm/vmscan.c    		classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
pgdat            3929 mm/vmscan.c    		kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
pgdat            3933 mm/vmscan.c    		alloc_order = reclaim_order = pgdat->kswapd_order;
pgdat            3934 mm/vmscan.c    		classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
pgdat            3935 mm/vmscan.c    		pgdat->kswapd_order = 0;
pgdat            3936 mm/vmscan.c    		pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
pgdat            3957 mm/vmscan.c    		trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
pgdat            3959 mm/vmscan.c    		reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
pgdat            3979 mm/vmscan.c    	pg_data_t *pgdat;
pgdat            3986 mm/vmscan.c    	pgdat = zone->zone_pgdat;
pgdat            3988 mm/vmscan.c    	if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
pgdat            3989 mm/vmscan.c    		pgdat->kswapd_classzone_idx = classzone_idx;
pgdat            3991 mm/vmscan.c    		pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
pgdat            3993 mm/vmscan.c    	pgdat->kswapd_order = max(pgdat->kswapd_order, order);
pgdat            3994 mm/vmscan.c    	if (!waitqueue_active(&pgdat->kswapd_wait))
pgdat            3998 mm/vmscan.c    	if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
pgdat            3999 mm/vmscan.c    	    (pgdat_balanced(pgdat, order, classzone_idx) &&
pgdat            4000 mm/vmscan.c    	     !pgdat_watermark_boosted(pgdat, classzone_idx))) {
pgdat            4009 mm/vmscan.c    			wakeup_kcompactd(pgdat, order, classzone_idx);
pgdat            4013 mm/vmscan.c    	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
pgdat            4015 mm/vmscan.c    	wake_up_interruptible(&pgdat->kswapd_wait);
pgdat            4066 mm/vmscan.c    		pg_data_t *pgdat = NODE_DATA(nid);
pgdat            4069 mm/vmscan.c    		mask = cpumask_of_node(pgdat->node_id);
pgdat            4073 mm/vmscan.c    			set_cpus_allowed_ptr(pgdat->kswapd, mask);
pgdat            4084 mm/vmscan.c    	pg_data_t *pgdat = NODE_DATA(nid);
pgdat            4087 mm/vmscan.c    	if (pgdat->kswapd)
pgdat            4090 mm/vmscan.c    	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
pgdat            4091 mm/vmscan.c    	if (IS_ERR(pgdat->kswapd)) {
pgdat            4095 mm/vmscan.c    		ret = PTR_ERR(pgdat->kswapd);
pgdat            4096 mm/vmscan.c    		pgdat->kswapd = NULL;
pgdat            4164 mm/vmscan.c    static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
pgdat            4166 mm/vmscan.c    	unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
pgdat            4167 mm/vmscan.c    	unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
pgdat            4168 mm/vmscan.c    		node_page_state(pgdat, NR_ACTIVE_FILE);
pgdat            4179 mm/vmscan.c    static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
pgdat            4191 mm/vmscan.c    		nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
pgdat            4193 mm/vmscan.c    		nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
pgdat            4197 mm/vmscan.c    		delta += node_page_state(pgdat, NR_FILE_DIRTY);
pgdat            4209 mm/vmscan.c    static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
pgdat            4226 mm/vmscan.c    	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
pgdat            4240 mm/vmscan.c    	if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
pgdat            4246 mm/vmscan.c    			shrink_node(pgdat, &sc);
pgdat            4260 mm/vmscan.c    int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
pgdat            4274 mm/vmscan.c    	if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
pgdat            4275 mm/vmscan.c    	    node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
pgdat            4290 mm/vmscan.c    	if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
pgdat            4293 mm/vmscan.c    	if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
pgdat            4296 mm/vmscan.c    	ret = __node_reclaim(pgdat, gfp_mask, order);
pgdat            4297 mm/vmscan.c    	clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
pgdat            4341 mm/vmscan.c    	struct pglist_data *pgdat = NULL;
pgdat            4351 mm/vmscan.c    		if (pagepgdat != pgdat) {
pgdat            4352 mm/vmscan.c    			if (pgdat)
pgdat            4353 mm/vmscan.c    				spin_unlock_irq(&pgdat->lru_lock);
pgdat            4354 mm/vmscan.c    			pgdat = pagepgdat;
pgdat            4355 mm/vmscan.c    			spin_lock_irq(&pgdat->lru_lock);
pgdat            4357 mm/vmscan.c    		lruvec = mem_cgroup_page_lruvec(page, pgdat);
pgdat            4373 mm/vmscan.c    	if (pgdat) {
pgdat            4376 mm/vmscan.c    		spin_unlock_irq(&pgdat->lru_lock);
pgdat             248 mm/vmstat.c    	struct pglist_data *pgdat;
pgdat             254 mm/vmstat.c    	for_each_online_pgdat(pgdat) {
pgdat             256 mm/vmstat.c    			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
pgdat             261 mm/vmstat.c    		struct pglist_data *pgdat = zone->zone_pgdat;
pgdat             273 mm/vmstat.c    			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
pgdat             274 mm/vmstat.c    			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
pgdat             291 mm/vmstat.c    void set_pgdat_percpu_threshold(pg_data_t *pgdat,
pgdat             299 mm/vmstat.c    	for (i = 0; i < pgdat->nr_zones; i++) {
pgdat             300 mm/vmstat.c    		zone = &pgdat->node_zones[i];
pgdat             336 mm/vmstat.c    void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
pgdat             339 mm/vmstat.c    	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
pgdat             349 mm/vmstat.c    		node_page_state_add(x, pgdat, item);
pgdat             395 mm/vmstat.c    void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             397 mm/vmstat.c    	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
pgdat             406 mm/vmstat.c    		node_page_state_add(v + overstep, pgdat, item);
pgdat             439 mm/vmstat.c    void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             441 mm/vmstat.c    	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
pgdat             450 mm/vmstat.c    		node_page_state_add(v - overstep, pgdat, item);
pgdat             537 mm/vmstat.c    static inline void mod_node_state(struct pglist_data *pgdat,
pgdat             540 mm/vmstat.c    	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
pgdat             572 mm/vmstat.c    		node_page_state_add(z, pgdat, item);
pgdat             575 mm/vmstat.c    void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
pgdat             578 mm/vmstat.c    	mod_node_state(pgdat, item, delta, 0);
pgdat             582 mm/vmstat.c    void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             584 mm/vmstat.c    	mod_node_state(pgdat, item, 1, 1);
pgdat             635 mm/vmstat.c    void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
pgdat             640 mm/vmstat.c    	__inc_node_state(pgdat, item);
pgdat             645 mm/vmstat.c    void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
pgdat             651 mm/vmstat.c    	__mod_node_page_state(pgdat, item, delta);
pgdat             659 mm/vmstat.c    	struct pglist_data *pgdat;
pgdat             661 mm/vmstat.c    	pgdat = page_pgdat(page);
pgdat             663 mm/vmstat.c    	__inc_node_state(pgdat, item);
pgdat             747 mm/vmstat.c    	struct pglist_data *pgdat;
pgdat             819 mm/vmstat.c    	for_each_online_pgdat(pgdat) {
pgdat             820 mm/vmstat.c    		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
pgdat             827 mm/vmstat.c    				atomic_long_add(v, &pgdat->vm_stat[i]);
pgdat             849 mm/vmstat.c    	struct pglist_data *pgdat;
pgdat             886 mm/vmstat.c    	for_each_online_pgdat(pgdat) {
pgdat             889 mm/vmstat.c    		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
pgdat             897 mm/vmstat.c    				atomic_long_add(v, &pgdat->vm_stat[i]);
pgdat             992 mm/vmstat.c    unsigned long node_page_state(struct pglist_data *pgdat,
pgdat             995 mm/vmstat.c    	long x = atomic_long_read(&pgdat->vm_stat[item]);
pgdat            1302 mm/vmstat.c    	pg_data_t *pgdat;
pgdat            1305 mm/vmstat.c    	for (pgdat = first_online_pgdat();
pgdat            1306 mm/vmstat.c    	     pgdat && node;
pgdat            1307 mm/vmstat.c    	     pgdat = next_online_pgdat(pgdat))
pgdat            1310 mm/vmstat.c    	return pgdat;
pgdat            1315 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1318 mm/vmstat.c    	return next_online_pgdat(pgdat);
pgdat            1329 mm/vmstat.c    static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
pgdat            1334 mm/vmstat.c    	struct zone *node_zones = pgdat->node_zones;
pgdat            1343 mm/vmstat.c    		print(m, pgdat, zone);
pgdat            1351 mm/vmstat.c    static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
pgdat            1356 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
pgdat            1367 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1368 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
pgdat            1373 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
pgdat            1379 mm/vmstat.c    					pgdat->node_id,
pgdat            1418 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1426 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
pgdat            1432 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
pgdat            1461 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
pgdat            1471 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1477 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, false,
pgdat            1489 mm/vmstat.c    static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
pgdat            1504 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, true,
pgdat            1515 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1518 mm/vmstat.c    	if (!node_state(pgdat->node_id, N_MEMORY))
pgdat            1524 mm/vmstat.c    	pagetypeinfo_showfree(m, pgdat);
pgdat            1525 mm/vmstat.c    	pagetypeinfo_showblockcount(m, pgdat);
pgdat            1526 mm/vmstat.c    	pagetypeinfo_showmixedcount(m, pgdat);
pgdat            1545 mm/vmstat.c    static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
pgdat            1550 mm/vmstat.c    		struct zone *compare = &pgdat->node_zones[zid];
pgdat            1559 mm/vmstat.c    static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
pgdat            1563 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
pgdat            1564 mm/vmstat.c    	if (is_zone_first_populated(pgdat, zone)) {
pgdat            1570 mm/vmstat.c    				node_page_state(pgdat, i));
pgdat            1635 mm/vmstat.c    		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
pgdat            1648 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            1649 mm/vmstat.c    	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
pgdat            2023 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
pgdat            2030 mm/vmstat.c    				pgdat->node_id,
pgdat            2052 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            2055 mm/vmstat.c    	if (!node_state(pgdat->node_id, N_MEMORY))
pgdat            2058 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
pgdat            2083 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
pgdat            2092 mm/vmstat.c    				pgdat->node_id,
pgdat            2108 mm/vmstat.c    	pg_data_t *pgdat = (pg_data_t *)arg;
pgdat            2110 mm/vmstat.c    	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
pgdat             184 mm/workingset.c static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
pgdat             190 mm/workingset.c 	eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
pgdat             196 mm/workingset.c static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
pgdat             211 mm/workingset.c 	*pgdat = NODE_DATA(nid);
pgdat             225 mm/workingset.c 	struct pglist_data *pgdat = page_pgdat(page);
pgdat             236 mm/workingset.c 	lruvec = mem_cgroup_lruvec(pgdat, memcg);
pgdat             238 mm/workingset.c 	return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
pgdat             252 mm/workingset.c 	struct pglist_data *pgdat;
pgdat             261 mm/workingset.c 	unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
pgdat             283 mm/workingset.c 	lruvec = mem_cgroup_lruvec(pgdat, memcg);