Home
last modified time | relevance | path

Searched refs:zonelist (Results 1 – 20 of 20) sorted by relevance

/linux-4.1.27/mm/
Doom_kill.c198 static enum oom_constraint constrained_alloc(struct zonelist *zonelist, in constrained_alloc() argument
211 if (!zonelist) in constrained_alloc()
234 for_each_zone_zonelist_nodemask(zone, z, zonelist, in constrained_alloc()
248 static enum oom_constraint constrained_alloc(struct zonelist *zonelist, in constrained_alloc() argument
653 bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) in oom_zonelist_trylock() argument
660 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
670 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_trylock()
683 void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) in oom_zonelist_unlock() argument
689 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) in oom_zonelist_unlock()
707 static void __out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, in __out_of_memory() argument
[all …]
Dpage_alloc.c1954 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) in zlc_setup() argument
1959 zlc = zonelist->zlcache_ptr; in zlc_setup()
1996 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, in zlc_zone_worth_trying() argument
2003 zlc = zonelist->zlcache_ptr; in zlc_zone_worth_trying()
2007 i = z - zonelist->_zonerefs; in zlc_zone_worth_trying()
2019 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) in zlc_mark_zone_full() argument
2024 zlc = zonelist->zlcache_ptr; in zlc_mark_zone_full()
2028 i = z - zonelist->_zonerefs; in zlc_mark_zone_full()
2037 static void zlc_clear_zones_full(struct zonelist *zonelist) in zlc_clear_zones_full() argument
2041 zlc = zonelist->zlcache_ptr; in zlc_clear_zones_full()
[all …]
Dmm_init.c35 struct zonelist *zonelist; in mminit_verify_zonelist() local
44 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
55 for_each_zone_zonelist(zone, z, zonelist, zoneid) { in mminit_verify_zonelist()
Dvmscan.c2453 static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument
2472 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2564 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, in do_try_to_free_pages() argument
2581 zones_reclaimable = shrink_zones(zonelist, sc); in do_try_to_free_pages()
2677 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument
2715 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
2761 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument
2781 if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask)) in try_to_free_pages()
2788 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages()
2840 struct zonelist *zonelist; in try_to_free_mem_cgroup_pages() local
[all …]
Dinternal.h126 struct zonelist *zonelist; member
Dmempolicy.c1650 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, in policy_zonelist()
1721 struct zonelist *zonelist; in mempolicy_slab_node() local
1724 zonelist = &NODE_DATA(node)->node_zonelists[0]; in mempolicy_slab_node()
1725 (void)first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node()
1809 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, in huge_zonelist()
1813 struct zonelist *zl; in huge_zonelist()
1928 struct zonelist *zl; in alloc_page_interleave()
1968 struct zonelist *zl; in alloc_pages_vma()
Dhugetlb.c634 struct zonelist *zonelist; in dequeue_huge_page_vma() local
654 zonelist = huge_zonelist(vma, address, in dequeue_huge_page_vma()
657 for_each_zone_zonelist_nodemask(zone, z, zonelist, in dequeue_huge_page_vma()
Dslab.c3013 struct zonelist *zonelist; in fallback_alloc() local
3029 zonelist = node_zonelist(mempolicy_slab_node(), flags); in fallback_alloc()
3036 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in fallback_alloc()
Dslub.c1669 struct zonelist *zonelist; in get_any_partial() local
1700 zonelist = node_zonelist(mempolicy_slab_node(), flags); in get_any_partial()
1701 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { in get_any_partial()
Dcompaction.c1499 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, in try_to_compact_pages()
/linux-4.1.27/include/linux/
Doom.h10 struct zonelist;
65 extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
66 extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
76 extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
Dgfp.h280 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
294 struct zonelist *zonelist, nodemask_t *nodemask);
298 struct zonelist *zonelist) in __alloc_pages() argument
300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages()
Dmmzone.h685 struct zonelist { struct
720 struct zonelist node_zonelists[MAX_ZONELISTS];
995 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, in first_zones_zonelist() argument
1000 struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist()
Dmempolicy.h148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
257 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, in huge_zonelist()
Dswap.h319 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
/linux-4.1.27/Documentation/vm/
Dnuma72 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
82 a default zonelist order based on the sizes of the various zone types relative
84 default zonelist order may be overridden using the numa_zonelist_order kernel
90 Linux will attempt to allocate from the first node in the appropriate zonelist
93 nodes' zones in the selected zonelist looking for the first zone in the list
121 zonelist--will not be the node itself. Rather, it will be the node that the
Dnuma_memory_policy.txt203 allocation will follow the per node zonelist.
/linux-4.1.27/arch/parisc/mm/
Dinit.c675 struct zonelist *zl; in show_mem()
/linux-4.1.27/Documentation/sysctl/
Dvm.txt529 In non-NUMA case, a zonelist for GFP_KERNEL is ordered as following.
535 Assume 2 node NUMA and below is zonelist of Node(0)'s GFP_KERNEL
/linux-4.1.27/Documentation/
Dkernel-parameters.txt2539 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.