Lines Matching refs:zone

164 static unsigned long zone_reclaimable_pages(struct zone *zone)  in zone_reclaimable_pages()  argument
168 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages()
169 zone_page_state(zone, NR_INACTIVE_FILE); in zone_reclaimable_pages()
172 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages()
173 zone_page_state(zone, NR_INACTIVE_ANON); in zone_reclaimable_pages()
178 bool zone_reclaimable(struct zone *zone) in zone_reclaimable() argument
180 return zone_page_state(zone, NR_PAGES_SCANNED) < in zone_reclaimable()
181 zone_reclaimable_pages(zone) * 6; in zone_reclaimable()
844 struct zone *zone, in shrink_page_list() argument
882 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
961 test_bit(ZONE_WRITEBACK, &zone->flags)) { in shrink_page_list()
1043 !test_bit(ZONE_DIRTY, &zone->flags))) { in shrink_page_list()
1187 unsigned long reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1207 ret = shrink_page_list(&clean_pages, zone, &sc, in reclaim_clean_pages_from_list()
1211 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); in reclaim_clean_pages_from_list()
1383 struct zone *zone = page_zone(page); in isolate_lru_page() local
1386 spin_lock_irq(&zone->lru_lock); in isolate_lru_page()
1387 lruvec = mem_cgroup_page_lruvec(page, zone); in isolate_lru_page()
1395 spin_unlock_irq(&zone->lru_lock); in isolate_lru_page()
1407 static int too_many_isolated(struct zone *zone, int file, in too_many_isolated() argument
1419 inactive = zone_page_state(zone, NR_INACTIVE_FILE); in too_many_isolated()
1420 isolated = zone_page_state(zone, NR_ISOLATED_FILE); in too_many_isolated()
1422 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in too_many_isolated()
1423 isolated = zone_page_state(zone, NR_ISOLATED_ANON); in too_many_isolated()
1441 struct zone *zone = lruvec_zone(lruvec); in putback_inactive_pages() local
1454 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1456 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1460 lruvec = mem_cgroup_page_lruvec(page, zone); in putback_inactive_pages()
1477 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1480 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1524 struct zone *zone = lruvec_zone(lruvec); in shrink_inactive_list() local
1527 while (unlikely(too_many_isolated(zone, file, sc))) { in shrink_inactive_list()
1542 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1547 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_inactive_list()
1548 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
1551 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_inactive_list()
1553 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); in shrink_inactive_list()
1555 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); in shrink_inactive_list()
1557 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1562 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, in shrink_inactive_list()
1567 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1573 __count_zone_vm_events(PGSTEAL_KSWAPD, zone, in shrink_inactive_list()
1576 __count_zone_vm_events(PGSTEAL_DIRECT, zone, in shrink_inactive_list()
1582 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
1584 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1604 set_bit(ZONE_WRITEBACK, &zone->flags); in shrink_inactive_list()
1616 set_bit(ZONE_CONGESTED, &zone->flags); in shrink_inactive_list()
1625 set_bit(ZONE_DIRTY, &zone->flags); in shrink_inactive_list()
1644 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); in shrink_inactive_list()
1646 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, in shrink_inactive_list()
1647 zone_idx(zone), in shrink_inactive_list()
1677 struct zone *zone = lruvec_zone(lruvec); in move_active_pages_to_lru() local
1684 lruvec = mem_cgroup_page_lruvec(page, zone); in move_active_pages_to_lru()
1700 spin_unlock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1703 spin_lock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1708 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); in move_active_pages_to_lru()
1729 struct zone *zone = lruvec_zone(lruvec); in shrink_active_list() local
1738 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1743 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_active_list()
1747 __count_zone_vm_events(PGREFILL, zone, nr_scanned); in shrink_active_list()
1748 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_active_list()
1749 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
1750 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1795 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1806 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
1807 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1814 static int inactive_anon_is_low_global(struct zone *zone) in inactive_anon_is_low_global() argument
1818 active = zone_page_state(zone, NR_ACTIVE_ANON); in inactive_anon_is_low_global()
1819 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in inactive_anon_is_low_global()
1821 if (inactive * zone->inactive_ratio < active) in inactive_anon_is_low_global()
1923 struct zone *zone = lruvec_zone(lruvec); in get_scan_count() local
1944 if (!zone_reclaimable(zone)) in get_scan_count()
1993 zonefree = zone_page_state(zone, NR_FREE_PAGES); in get_scan_count()
1994 zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + in get_scan_count()
1995 zone_page_state(zone, NR_INACTIVE_FILE); in get_scan_count()
1997 if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { in get_scan_count()
2038 spin_lock_irq(&zone->lru_lock); in get_scan_count()
2059 spin_unlock_irq(&zone->lru_lock); in get_scan_count()
2253 static inline bool should_continue_reclaim(struct zone *zone, in should_continue_reclaim() argument
2293 inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); in should_continue_reclaim()
2295 inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); in should_continue_reclaim()
2301 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim()
2310 static bool shrink_zone(struct zone *zone, struct scan_control *sc, in shrink_zone() argument
2320 .zone = zone, in shrink_zone()
2342 lruvec = mem_cgroup_zone_lruvec(zone, memcg); in shrink_zone()
2350 shrink_slab(sc->gfp_mask, zone_to_nid(zone), in shrink_zone()
2376 shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, in shrink_zone()
2392 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, in shrink_zone()
2402 static inline bool compaction_ready(struct zone *zone, int order) in compaction_ready() argument
2413 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in compaction_ready()
2414 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in compaction_ready()
2415 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); in compaction_ready()
2416 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); in compaction_ready()
2422 if (compaction_deferred(zone, order)) in compaction_ready()
2429 if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) in compaction_ready()
2456 struct zone *zone; in shrink_zones() local
2472 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2476 if (!populated_zone(zone)) in shrink_zones()
2480 while (!populated_zone(zone->zone_pgdat->node_zones + in shrink_zones()
2489 if (!cpuset_zone_allowed(zone, in shrink_zones()
2494 !zone_reclaimable(zone)) in shrink_zones()
2509 compaction_ready(zone, sc->order)) { in shrink_zones()
2521 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in shrink_zones()
2531 if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) in shrink_zones()
2535 !reclaimable && zone_reclaimable(zone)) in shrink_zones()
2637 struct zone *zone; in pfmemalloc_watermark_ok() local
2644 zone = &pgdat->node_zones[i]; in pfmemalloc_watermark_ok()
2645 if (!populated_zone(zone)) in pfmemalloc_watermark_ok()
2648 pfmemalloc_reserve += min_wmark_pages(zone); in pfmemalloc_watermark_ok()
2649 free_pages += zone_page_state(zone, NR_FREE_PAGES); in pfmemalloc_watermark_ok()
2681 struct zone *zone; in throttle_direct_reclaim() local
2715 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
2717 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
2721 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
2750 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
2799 struct zone *zone, in mem_cgroup_shrink_node_zone() argument
2809 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in mem_cgroup_shrink_node_zone()
2875 static void age_active_anon(struct zone *zone, struct scan_control *sc) in age_active_anon() argument
2884 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in age_active_anon()
2894 static bool zone_balanced(struct zone *zone, int order, in zone_balanced() argument
2897 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + in zone_balanced()
2901 if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, in zone_balanced()
2936 struct zone *zone = pgdat->node_zones + i; in pgdat_balanced() local
2938 if (!populated_zone(zone)) in pgdat_balanced()
2941 managed_pages += zone->managed_pages; in pgdat_balanced()
2950 if (!zone_reclaimable(zone)) { in pgdat_balanced()
2951 balanced_pages += zone->managed_pages; in pgdat_balanced()
2955 if (zone_balanced(zone, order, 0, i)) in pgdat_balanced()
2956 balanced_pages += zone->managed_pages; in pgdat_balanced()
3007 static bool kswapd_shrink_zone(struct zone *zone, in kswapd_shrink_zone() argument
3017 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); in kswapd_shrink_zone()
3026 compaction_suitable(zone, sc->order, 0, classzone_idx) in kswapd_shrink_zone()
3036 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in kswapd_shrink_zone()
3037 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in kswapd_shrink_zone()
3043 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); in kswapd_shrink_zone()
3044 if (!lowmem_pressure && zone_balanced(zone, testorder, in kswapd_shrink_zone()
3048 shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); in kswapd_shrink_zone()
3053 clear_bit(ZONE_WRITEBACK, &zone->flags); in kswapd_shrink_zone()
3061 if (zone_reclaimable(zone) && in kswapd_shrink_zone()
3062 zone_balanced(zone, testorder, 0, classzone_idx)) { in kswapd_shrink_zone()
3063 clear_bit(ZONE_CONGESTED, &zone->flags); in kswapd_shrink_zone()
3064 clear_bit(ZONE_DIRTY, &zone->flags); in kswapd_shrink_zone()
3120 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3122 if (!populated_zone(zone)) in balance_pgdat()
3126 !zone_reclaimable(zone)) in balance_pgdat()
3133 age_active_anon(zone, &sc); in balance_pgdat()
3146 if (!zone_balanced(zone, order, 0, 0)) { in balance_pgdat()
3154 clear_bit(ZONE_CONGESTED, &zone->flags); in balance_pgdat()
3155 clear_bit(ZONE_DIRTY, &zone->flags); in balance_pgdat()
3163 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3165 if (!populated_zone(zone)) in balance_pgdat()
3174 zone_watermark_ok(zone, order, in balance_pgdat()
3175 low_wmark_pages(zone), in balance_pgdat()
3197 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3199 if (!populated_zone(zone)) in balance_pgdat()
3203 !zone_reclaimable(zone)) in balance_pgdat()
3212 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in balance_pgdat()
3223 if (kswapd_shrink_zone(zone, end_zone, in balance_pgdat()
3446 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) in wakeup_kswapd() argument
3450 if (!populated_zone(zone)) in wakeup_kswapd()
3453 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) in wakeup_kswapd()
3455 pgdat = zone->zone_pgdat; in wakeup_kswapd()
3462 if (zone_balanced(zone, order, 0, 0)) in wakeup_kswapd()
3465 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); in wakeup_kswapd()
3616 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) in zone_unmapped_file_pages() argument
3618 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); in zone_unmapped_file_pages()
3619 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + in zone_unmapped_file_pages()
3620 zone_page_state(zone, NR_ACTIVE_FILE); in zone_unmapped_file_pages()
3631 static long zone_pagecache_reclaimable(struct zone *zone) in zone_pagecache_reclaimable() argument
3643 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); in zone_pagecache_reclaimable()
3645 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); in zone_pagecache_reclaimable()
3649 delta += zone_page_state(zone, NR_FILE_DIRTY); in zone_pagecache_reclaimable()
3661 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in __zone_reclaim() argument
3688 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { in __zone_reclaim()
3694 shrink_zone(zone, &sc, true); in __zone_reclaim()
3704 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in zone_reclaim() argument
3719 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && in zone_reclaim()
3720 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) in zone_reclaim()
3723 if (!zone_reclaimable(zone)) in zone_reclaim()
3738 node_id = zone_to_nid(zone); in zone_reclaim()
3742 if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) in zone_reclaim()
3745 ret = __zone_reclaim(zone, gfp_mask, order); in zone_reclaim()
3746 clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); in zone_reclaim()
3785 struct zone *zone = NULL; in check_move_unevictable_pages() local
3792 struct zone *pagezone; in check_move_unevictable_pages()
3796 if (pagezone != zone) { in check_move_unevictable_pages()
3797 if (zone) in check_move_unevictable_pages()
3798 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3799 zone = pagezone; in check_move_unevictable_pages()
3800 spin_lock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3802 lruvec = mem_cgroup_page_lruvec(page, zone); in check_move_unevictable_pages()
3818 if (zone) { in check_move_unevictable_pages()
3821 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()