Home
last modified time | relevance | path

Searched refs:zonelist (Results 1 – 22 of 22) sorted by relevance

/linux-4.4.14/include/linux/
Doom.h10 struct zonelist;
21 struct zonelist *zonelist; member
Dgfp.h395 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
409 struct zonelist *zonelist, nodemask_t *nodemask);
413 struct zonelist *zonelist) in __alloc_pages() argument
415 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages()
Dmmzone.h616 struct zonelist { struct
639 struct zonelist node_zonelists[MAX_ZONELISTS]; argument
940 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, in first_zones_zonelist() argument
945 struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
Dmempolicy.h148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
257 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, in huge_zonelist()
Dswap.h319 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
/linux-4.4.14/mm/
Dmm_init.c36 struct zonelist *zonelist; in mminit_verify_zonelist() local
45 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
56 for_each_zone_zonelist(zone, z, zonelist, zoneid) { in mminit_verify_zonelist()
Dpage_alloc.c1706 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock() local
1713 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
2503 struct zonelist *zonelist = ac->zonelist; in get_page_from_freelist() local
2517 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in get_page_from_freelist()
2707 .zonelist = ac->zonelist, in __alloc_pages_may_oom()
2855 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2926 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in wake_all_kswapds()
3046 preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
3064 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); in __alloc_pages_slowpath()
3193 struct zonelist *zonelist, nodemask_t *nodemask) in __alloc_pages_nodemask() argument
[all …]
Dvmscan.c2517 static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument
2536 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2628 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, in do_try_to_free_pages() argument
2645 zones_reclaimable = shrink_zones(zonelist, sc); in do_try_to_free_pages()
2742 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument
2780 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
2826 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument
2846 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) in try_to_free_pages()
2853 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages()
2905 struct zonelist *zonelist; in try_to_free_mem_cgroup_pages() local
[all …]
Dinternal.h145 struct zonelist *zonelist; member
Doom_kill.c220 if (!oc->zonelist) in constrained_alloc()
244 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
747 .zonelist = NULL, in pagefault_out_of_memory()
Dmempolicy.c1647 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, in policy_zonelist()
1718 struct zonelist *zonelist; in mempolicy_slab_node() local
1721 zonelist = &NODE_DATA(node)->node_zonelists[0]; in mempolicy_slab_node()
1722 (void)first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node()
1806 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, in huge_zonelist()
1810 struct zonelist *zl; in huge_zonelist()
1925 struct zonelist *zl; in alloc_page_interleave()
1965 struct zonelist *zl; in alloc_pages_vma()
Dhugetlb.c883 struct zonelist *zonelist; in dequeue_huge_page_vma() local
903 zonelist = huge_zonelist(vma, address, in dequeue_huge_page_vma()
906 for_each_zone_zonelist_nodemask(zone, z, zonelist, in dequeue_huge_page_vma()
1494 struct zonelist *zl; in __hugetlb_alloc_buddy_huge_page()
Dslab.c3004 struct zonelist *zonelist; in fallback_alloc() local
3020 zonelist = node_zonelist(mempolicy_slab_node(), flags); in fallback_alloc()
3027 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in fallback_alloc()
Dcompaction.c1539 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in try_to_compact_pages()
Dslub.c1712 struct zonelist *zonelist; in get_any_partial() local
1743 zonelist = node_zonelist(mempolicy_slab_node(), flags); in get_any_partial()
1744 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in get_any_partial()
Dmemcontrol.c1340 .zonelist = NULL, in mem_cgroup_out_of_memory()
/linux-4.4.14/Documentation/vm/
Dnuma72 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
82 a default zonelist order based on the sizes of the various zone types relative
84 default zonelist order may be overridden using the numa_zonelist_order kernel
90 Linux will attempt to allocate from the first node in the appropriate zonelist
93 nodes' zones in the selected zonelist looking for the first zone in the list
121 zonelist--will not be the node itself. Rather, it will be the node that the
Dnuma_memory_policy.txt203 allocation will follow the per node zonelist.
/linux-4.4.14/drivers/tty/
Dsysrq.c358 .zonelist = node_zonelist(first_memory_node, gfp_mask), in moom_callback()
/linux-4.4.14/arch/parisc/mm/
Dinit.c685 struct zonelist *zl; in show_mem()
/linux-4.4.14/Documentation/sysctl/
Dvm.txt529 In non-NUMA case, a zonelist for GFP_KERNEL is ordered as following.
535 Assume 2 node NUMA and below is zonelist of Node(0)'s GFP_KERNEL
/linux-4.4.14/Documentation/
Dkernel-parameters.txt2616 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.