Lines Matching refs:zone

195 static unsigned long zone_reclaimable_pages(struct zone *zone)  in zone_reclaimable_pages()  argument
199 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages()
200 zone_page_state(zone, NR_INACTIVE_FILE); in zone_reclaimable_pages()
203 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages()
204 zone_page_state(zone, NR_INACTIVE_ANON); in zone_reclaimable_pages()
209 bool zone_reclaimable(struct zone *zone) in zone_reclaimable() argument
211 return zone_page_state(zone, NR_PAGES_SCANNED) < in zone_reclaimable()
212 zone_reclaimable_pages(zone) * 6; in zone_reclaimable()
881 struct zone *zone, in shrink_page_list() argument
919 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
998 test_bit(ZONE_WRITEBACK, &zone->flags)) { in shrink_page_list()
1084 !test_bit(ZONE_DIRTY, &zone->flags))) { in shrink_page_list()
1234 unsigned long reclaim_clean_pages_from_list(struct zone *zone, in reclaim_clean_pages_from_list() argument
1254 ret = shrink_page_list(&clean_pages, zone, &sc, in reclaim_clean_pages_from_list()
1258 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); in reclaim_clean_pages_from_list()
1431 struct zone *zone = page_zone(page); in isolate_lru_page() local
1434 spin_lock_irq(&zone->lru_lock); in isolate_lru_page()
1435 lruvec = mem_cgroup_page_lruvec(page, zone); in isolate_lru_page()
1443 spin_unlock_irq(&zone->lru_lock); in isolate_lru_page()
1455 static int too_many_isolated(struct zone *zone, int file, in too_many_isolated() argument
1467 inactive = zone_page_state(zone, NR_INACTIVE_FILE); in too_many_isolated()
1468 isolated = zone_page_state(zone, NR_ISOLATED_FILE); in too_many_isolated()
1470 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in too_many_isolated()
1471 isolated = zone_page_state(zone, NR_ISOLATED_ANON); in too_many_isolated()
1489 struct zone *zone = lruvec_zone(lruvec); in putback_inactive_pages() local
1502 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1504 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1508 lruvec = mem_cgroup_page_lruvec(page, zone); in putback_inactive_pages()
1525 spin_unlock_irq(&zone->lru_lock); in putback_inactive_pages()
1528 spin_lock_irq(&zone->lru_lock); in putback_inactive_pages()
1572 struct zone *zone = lruvec_zone(lruvec); in shrink_inactive_list() local
1575 while (unlikely(too_many_isolated(zone, file, sc))) { in shrink_inactive_list()
1590 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1595 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_inactive_list()
1596 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_inactive_list()
1599 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_inactive_list()
1601 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); in shrink_inactive_list()
1603 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned); in shrink_inactive_list()
1605 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1610 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, in shrink_inactive_list()
1615 spin_lock_irq(&zone->lru_lock); in shrink_inactive_list()
1621 __count_zone_vm_events(PGSTEAL_KSWAPD, zone, in shrink_inactive_list()
1624 __count_zone_vm_events(PGSTEAL_DIRECT, zone, in shrink_inactive_list()
1630 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
1632 spin_unlock_irq(&zone->lru_lock); in shrink_inactive_list()
1652 set_bit(ZONE_WRITEBACK, &zone->flags); in shrink_inactive_list()
1664 set_bit(ZONE_CONGESTED, &zone->flags); in shrink_inactive_list()
1673 set_bit(ZONE_DIRTY, &zone->flags); in shrink_inactive_list()
1692 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); in shrink_inactive_list()
1694 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, in shrink_inactive_list()
1695 zone_idx(zone), in shrink_inactive_list()
1725 struct zone *zone = lruvec_zone(lruvec); in move_active_pages_to_lru() local
1732 lruvec = mem_cgroup_page_lruvec(page, zone); in move_active_pages_to_lru()
1748 spin_unlock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1751 spin_lock_irq(&zone->lru_lock); in move_active_pages_to_lru()
1756 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); in move_active_pages_to_lru()
1777 struct zone *zone = lruvec_zone(lruvec); in shrink_active_list() local
1786 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1791 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned); in shrink_active_list()
1795 __count_zone_vm_events(PGREFILL, zone, nr_scanned); in shrink_active_list()
1796 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken); in shrink_active_list()
1797 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); in shrink_active_list()
1798 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1843 spin_lock_irq(&zone->lru_lock); in shrink_active_list()
1854 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
1855 spin_unlock_irq(&zone->lru_lock); in shrink_active_list()
1862 static bool inactive_anon_is_low_global(struct zone *zone) in inactive_anon_is_low_global() argument
1866 active = zone_page_state(zone, NR_ACTIVE_ANON); in inactive_anon_is_low_global()
1867 inactive = zone_page_state(zone, NR_INACTIVE_ANON); in inactive_anon_is_low_global()
1869 return inactive * zone->inactive_ratio < active; in inactive_anon_is_low_global()
1968 struct zone *zone = lruvec_zone(lruvec); in get_scan_count() local
1989 if (!zone_reclaimable(zone)) in get_scan_count()
2038 zonefree = zone_page_state(zone, NR_FREE_PAGES); in get_scan_count()
2039 zonefile = zone_page_state(zone, NR_ACTIVE_FILE) + in get_scan_count()
2040 zone_page_state(zone, NR_INACTIVE_FILE); in get_scan_count()
2042 if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) { in get_scan_count()
2083 spin_lock_irq(&zone->lru_lock); in get_scan_count()
2104 spin_unlock_irq(&zone->lru_lock); in get_scan_count()
2317 static inline bool should_continue_reclaim(struct zone *zone, in should_continue_reclaim() argument
2357 inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); in should_continue_reclaim()
2359 inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); in should_continue_reclaim()
2365 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim()
2374 static bool shrink_zone(struct zone *zone, struct scan_control *sc, in shrink_zone() argument
2384 .zone = zone, in shrink_zone()
2406 lruvec = mem_cgroup_zone_lruvec(zone, memcg); in shrink_zone()
2414 shrink_slab(sc->gfp_mask, zone_to_nid(zone), in shrink_zone()
2440 shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, in shrink_zone()
2456 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, in shrink_zone()
2466 static inline bool compaction_ready(struct zone *zone, int order) in compaction_ready() argument
2477 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in compaction_ready()
2478 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in compaction_ready()
2479 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); in compaction_ready()
2480 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0); in compaction_ready()
2486 if (compaction_deferred(zone, order)) in compaction_ready()
2493 if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) in compaction_ready()
2520 struct zone *zone; in shrink_zones() local
2536 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
2540 if (!populated_zone(zone)) in shrink_zones()
2544 while (!populated_zone(zone->zone_pgdat->node_zones + in shrink_zones()
2553 if (!cpuset_zone_allowed(zone, in shrink_zones()
2558 !zone_reclaimable(zone)) in shrink_zones()
2573 compaction_ready(zone, sc->order)) { in shrink_zones()
2585 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in shrink_zones()
2595 if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) in shrink_zones()
2599 !reclaimable && zone_reclaimable(zone)) in shrink_zones()
2701 struct zone *zone; in pfmemalloc_watermark_ok() local
2708 zone = &pgdat->node_zones[i]; in pfmemalloc_watermark_ok()
2709 if (!populated_zone(zone) || in pfmemalloc_watermark_ok()
2710 zone_reclaimable_pages(zone) == 0) in pfmemalloc_watermark_ok()
2713 pfmemalloc_reserve += min_wmark_pages(zone); in pfmemalloc_watermark_ok()
2714 free_pages += zone_page_state(zone, NR_FREE_PAGES); in pfmemalloc_watermark_ok()
2746 struct zone *zone; in throttle_direct_reclaim() local
2780 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
2782 if (zone_idx(zone) > ZONE_NORMAL) in throttle_direct_reclaim()
2786 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
2815 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
2864 struct zone *zone, in mem_cgroup_shrink_node_zone() argument
2874 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in mem_cgroup_shrink_node_zone()
2940 static void age_active_anon(struct zone *zone, struct scan_control *sc) in age_active_anon() argument
2949 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); in age_active_anon()
2959 static bool zone_balanced(struct zone *zone, int order, in zone_balanced() argument
2962 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + in zone_balanced()
2966 if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, in zone_balanced()
3001 struct zone *zone = pgdat->node_zones + i; in pgdat_balanced() local
3003 if (!populated_zone(zone)) in pgdat_balanced()
3006 managed_pages += zone->managed_pages; in pgdat_balanced()
3015 if (!zone_reclaimable(zone)) { in pgdat_balanced()
3016 balanced_pages += zone->managed_pages; in pgdat_balanced()
3020 if (zone_balanced(zone, order, 0, i)) in pgdat_balanced()
3021 balanced_pages += zone->managed_pages; in pgdat_balanced()
3072 static bool kswapd_shrink_zone(struct zone *zone, in kswapd_shrink_zone() argument
3082 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); in kswapd_shrink_zone()
3091 compaction_suitable(zone, sc->order, 0, classzone_idx) in kswapd_shrink_zone()
3101 balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP( in kswapd_shrink_zone()
3102 zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO)); in kswapd_shrink_zone()
3108 lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone)); in kswapd_shrink_zone()
3109 if (!lowmem_pressure && zone_balanced(zone, testorder, in kswapd_shrink_zone()
3113 shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); in kswapd_shrink_zone()
3118 clear_bit(ZONE_WRITEBACK, &zone->flags); in kswapd_shrink_zone()
3126 if (zone_reclaimable(zone) && in kswapd_shrink_zone()
3127 zone_balanced(zone, testorder, 0, classzone_idx)) { in kswapd_shrink_zone()
3128 clear_bit(ZONE_CONGESTED, &zone->flags); in kswapd_shrink_zone()
3129 clear_bit(ZONE_DIRTY, &zone->flags); in kswapd_shrink_zone()
3185 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3187 if (!populated_zone(zone)) in balance_pgdat()
3191 !zone_reclaimable(zone)) in balance_pgdat()
3198 age_active_anon(zone, &sc); in balance_pgdat()
3211 if (!zone_balanced(zone, order, 0, 0)) { in balance_pgdat()
3219 clear_bit(ZONE_CONGESTED, &zone->flags); in balance_pgdat()
3220 clear_bit(ZONE_DIRTY, &zone->flags); in balance_pgdat()
3228 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3230 if (!populated_zone(zone)) in balance_pgdat()
3239 zone_watermark_ok(zone, order, in balance_pgdat()
3240 low_wmark_pages(zone), in balance_pgdat()
3262 struct zone *zone = pgdat->node_zones + i; in balance_pgdat() local
3264 if (!populated_zone(zone)) in balance_pgdat()
3268 !zone_reclaimable(zone)) in balance_pgdat()
3277 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, in balance_pgdat()
3288 if (kswapd_shrink_zone(zone, end_zone, in balance_pgdat()
3511 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) in wakeup_kswapd() argument
3515 if (!populated_zone(zone)) in wakeup_kswapd()
3518 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) in wakeup_kswapd()
3520 pgdat = zone->zone_pgdat; in wakeup_kswapd()
3527 if (zone_balanced(zone, order, 0, 0)) in wakeup_kswapd()
3530 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order); in wakeup_kswapd()
3681 static inline unsigned long zone_unmapped_file_pages(struct zone *zone) in zone_unmapped_file_pages() argument
3683 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); in zone_unmapped_file_pages()
3684 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + in zone_unmapped_file_pages()
3685 zone_page_state(zone, NR_ACTIVE_FILE); in zone_unmapped_file_pages()
3696 static unsigned long zone_pagecache_reclaimable(struct zone *zone) in zone_pagecache_reclaimable() argument
3708 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); in zone_pagecache_reclaimable()
3710 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); in zone_pagecache_reclaimable()
3714 delta += zone_page_state(zone, NR_FILE_DIRTY); in zone_pagecache_reclaimable()
3726 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in __zone_reclaim() argument
3753 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { in __zone_reclaim()
3759 shrink_zone(zone, &sc, true); in __zone_reclaim()
3769 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) in zone_reclaim() argument
3784 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && in zone_reclaim()
3785 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) in zone_reclaim()
3788 if (!zone_reclaimable(zone)) in zone_reclaim()
3803 node_id = zone_to_nid(zone); in zone_reclaim()
3807 if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) in zone_reclaim()
3810 ret = __zone_reclaim(zone, gfp_mask, order); in zone_reclaim()
3811 clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); in zone_reclaim()
3850 struct zone *zone = NULL; in check_move_unevictable_pages() local
3857 struct zone *pagezone; in check_move_unevictable_pages()
3861 if (pagezone != zone) { in check_move_unevictable_pages()
3862 if (zone) in check_move_unevictable_pages()
3863 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3864 zone = pagezone; in check_move_unevictable_pages()
3865 spin_lock_irq(&zone->lru_lock); in check_move_unevictable_pages()
3867 lruvec = mem_cgroup_page_lruvec(page, zone); in check_move_unevictable_pages()
3883 if (zone) { in check_move_unevictable_pages()
3886 spin_unlock_irq(&zone->lru_lock); in check_move_unevictable_pages()