Lines Matching refs:sc
153 static bool global_reclaim(struct scan_control *sc) in global_reclaim() argument
155 return !sc->target_mem_cgroup; in global_reclaim()
158 static bool global_reclaim(struct scan_control *sc) in global_reclaim() argument
401 struct shrink_control sc = { in shrink_slab() local
411 sc.nid = 0; in shrink_slab()
413 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); in shrink_slab()
456 struct scan_control *sc) in may_write_to_queue() argument
505 struct scan_control *sc) in pageout() argument
541 if (!may_write_to_queue(inode_to_bdi(mapping->host), sc)) in pageout()
757 struct scan_control *sc) in page_check_references() argument
762 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, in page_check_references()
845 struct scan_control *sc, in shrink_page_list() argument
884 sc->nr_scanned++; in shrink_page_list()
889 if (!sc->may_unmap && page_mapped(page)) in shrink_page_list()
894 sc->nr_scanned++; in shrink_page_list()
896 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
897 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
966 } else if (global_reclaim(sc) || in shrink_page_list()
991 references = page_check_references(page, sc); in shrink_page_list()
1008 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1060 if (!sc->may_writepage) in shrink_page_list()
1064 switch (pageout(page, mapping, sc)) { in shrink_page_list()
1111 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1190 struct scan_control sc = { in reclaim_clean_pages_from_list() local
1207 ret = shrink_page_list(&clean_pages, zone, &sc, in reclaim_clean_pages_from_list()
1311 unsigned long *nr_scanned, struct scan_control *sc, in isolate_lru_pages() argument
1346 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, in isolate_lru_pages()
1408 struct scan_control *sc) in too_many_isolated() argument
1415 if (!global_reclaim(sc)) in too_many_isolated()
1431 if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) in too_many_isolated()
1511 struct scan_control *sc, enum lru_list lru) in shrink_inactive_list() argument
1527 while (unlikely(too_many_isolated(zone, file, sc))) { in shrink_inactive_list()
1537 if (!sc->may_unmap) in shrink_inactive_list()
1539 if (!sc->may_writepage) in shrink_inactive_list()
1545 &nr_scanned, sc, isolate_mode, lru); in shrink_inactive_list()
1550 if (global_reclaim(sc)) { in shrink_inactive_list()
1562 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, in shrink_inactive_list()
1571 if (global_reclaim(sc)) { in shrink_inactive_list()
1610 if (global_reclaim(sc)) { in shrink_inactive_list()
1642 if (!sc->hibernation_mode && !current_is_kswapd() && in shrink_inactive_list()
1649 sc->priority, in shrink_inactive_list()
1715 struct scan_control *sc, in shrink_active_list() argument
1733 if (!sc->may_unmap) in shrink_active_list()
1735 if (!sc->may_writepage) in shrink_active_list()
1741 &nr_scanned, sc, isolate_mode, lru); in shrink_active_list()
1742 if (global_reclaim(sc)) in shrink_active_list()
1770 if (page_referenced(page, 0, sc->target_mem_cgroup, in shrink_active_list()
1889 struct lruvec *lruvec, struct scan_control *sc) in shrink_list() argument
1893 shrink_active_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
1897 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
1917 struct scan_control *sc, unsigned long *nr, in get_scan_count() argument
1949 if (!global_reclaim(sc)) in get_scan_count()
1953 if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { in get_scan_count()
1965 if (!global_reclaim(sc) && !swappiness) { in get_scan_count()
1975 if (!sc->priority && swappiness) { in get_scan_count()
1989 if (global_reclaim(sc)) { in get_scan_count()
2075 scan = size >> sc->priority; in get_scan_count()
2121 struct scan_control *sc, unsigned long *lru_pages) in shrink_lruvec() argument
2128 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
2132 get_scan_count(lruvec, swappiness, sc, nr, lru_pages); in shrink_lruvec()
2148 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && in shrink_lruvec()
2149 sc->priority == DEF_PRIORITY); in shrink_lruvec()
2163 lruvec, sc); in shrink_lruvec()
2222 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
2230 sc, LRU_ACTIVE_ANON); in shrink_lruvec()
2232 throttle_vm_writeout(sc->gfp_mask); in shrink_lruvec()
2236 static bool in_reclaim_compaction(struct scan_control *sc) in in_reclaim_compaction() argument
2238 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2239 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2240 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
2256 struct scan_control *sc) in should_continue_reclaim() argument
2262 if (!in_reclaim_compaction(sc)) in should_continue_reclaim()
2266 if (sc->gfp_mask & __GFP_REPEAT) { in should_continue_reclaim()
2292 pages_for_compaction = (2UL << sc->order); in should_continue_reclaim()
2296 if (sc->nr_reclaimed < pages_for_compaction && in should_continue_reclaim()
2301 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim()
2310 static bool shrink_zone(struct zone *zone, struct scan_control *sc, in shrink_zone() argument
2318 struct mem_cgroup *root = sc->target_mem_cgroup; in shrink_zone()
2321 .priority = sc->priority, in shrink_zone()
2326 nr_reclaimed = sc->nr_reclaimed; in shrink_zone()
2327 nr_scanned = sc->nr_scanned; in shrink_zone()
2337 if (!sc->may_thrash) in shrink_zone()
2344 scanned = sc->nr_scanned; in shrink_zone()
2346 shrink_lruvec(lruvec, swappiness, sc, &lru_pages); in shrink_zone()
2350 shrink_slab(sc->gfp_mask, zone_to_nid(zone), in shrink_zone()
2351 memcg, sc->nr_scanned - scanned, in shrink_zone()
2364 if (!global_reclaim(sc) && in shrink_zone()
2365 sc->nr_reclaimed >= sc->nr_to_reclaim) { in shrink_zone()
2375 if (global_reclaim(sc) && is_classzone) in shrink_zone()
2376 shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, in shrink_zone()
2377 sc->nr_scanned - nr_scanned, in shrink_zone()
2381 sc->nr_reclaimed += reclaim_state->reclaimed_slab; in shrink_zone()
2385 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, in shrink_zone()
2386 sc->nr_scanned - nr_scanned, in shrink_zone()
2387 sc->nr_reclaimed - nr_reclaimed); in shrink_zone()
2389 if (sc->nr_reclaimed - nr_reclaimed) in shrink_zone()
2392 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, in shrink_zone()
2393 sc->nr_scanned - nr_scanned, sc)); in shrink_zone()
2453 static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument
2460 enum zone_type requested_highidx = gfp_zone(sc->gfp_mask); in shrink_zones()
2468 orig_mask = sc->gfp_mask; in shrink_zones()
2470 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
2473 gfp_zone(sc->gfp_mask), sc->nodemask) { in shrink_zones()
2488 if (global_reclaim(sc)) { in shrink_zones()
2493 if (sc->priority != DEF_PRIORITY && in shrink_zones()
2507 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
2509 compaction_ready(zone, sc->order)) { in shrink_zones()
2510 sc->compaction_ready = true; in shrink_zones()
2522 sc->order, sc->gfp_mask, in shrink_zones()
2524 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
2525 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
2531 if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) in shrink_zones()
2534 if (global_reclaim(sc) && in shrink_zones()
2543 sc->gfp_mask = orig_mask; in shrink_zones()
2565 struct scan_control *sc) in do_try_to_free_pages() argument
2567 int initial_priority = sc->priority; in do_try_to_free_pages()
2574 if (global_reclaim(sc)) in do_try_to_free_pages()
2578 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
2579 sc->priority); in do_try_to_free_pages()
2580 sc->nr_scanned = 0; in do_try_to_free_pages()
2581 zones_reclaimable = shrink_zones(zonelist, sc); in do_try_to_free_pages()
2583 total_scanned += sc->nr_scanned; in do_try_to_free_pages()
2584 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
2587 if (sc->compaction_ready) in do_try_to_free_pages()
2594 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
2595 sc->may_writepage = 1; in do_try_to_free_pages()
2604 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; in do_try_to_free_pages()
2608 sc->may_writepage = 1; in do_try_to_free_pages()
2610 } while (--sc->priority >= 0); in do_try_to_free_pages()
2614 if (sc->nr_reclaimed) in do_try_to_free_pages()
2615 return sc->nr_reclaimed; in do_try_to_free_pages()
2618 if (sc->compaction_ready) in do_try_to_free_pages()
2622 if (!sc->may_thrash) { in do_try_to_free_pages()
2623 sc->priority = initial_priority; in do_try_to_free_pages()
2624 sc->may_thrash = 1; in do_try_to_free_pages()
2765 struct scan_control sc = { in try_to_free_pages() local
2785 sc.may_writepage, in try_to_free_pages()
2788 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages()
2802 struct scan_control sc = { in mem_cgroup_shrink_node_zone() local
2813 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | in mem_cgroup_shrink_node_zone()
2816 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, in mem_cgroup_shrink_node_zone()
2817 sc.may_writepage, in mem_cgroup_shrink_node_zone()
2818 sc.gfp_mask); in mem_cgroup_shrink_node_zone()
2827 shrink_lruvec(lruvec, swappiness, &sc, &lru_pages); in mem_cgroup_shrink_node_zone()
2829 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); in mem_cgroup_shrink_node_zone()
2831 *nr_scanned = sc.nr_scanned; in mem_cgroup_shrink_node_zone()
2832 return sc.nr_reclaimed; in mem_cgroup_shrink_node_zone()
2843 struct scan_control sc = { in try_to_free_mem_cgroup_pages() local
2864 sc.may_writepage, in try_to_free_mem_cgroup_pages()
2865 sc.gfp_mask); in try_to_free_mem_cgroup_pages()
2867 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_mem_cgroup_pages()
2875 static void age_active_anon(struct zone *zone, struct scan_control *sc) in age_active_anon() argument
2888 sc, LRU_ACTIVE_ANON); in age_active_anon()
3009 struct scan_control *sc, in kswapd_shrink_zone() argument
3012 int testorder = sc->order; in kswapd_shrink_zone()
3017 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); in kswapd_shrink_zone()
3025 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in kswapd_shrink_zone()
3026 compaction_suitable(zone, sc->order, 0, classzone_idx) in kswapd_shrink_zone()
3048 shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); in kswapd_shrink_zone()
3051 *nr_attempted += sc->nr_to_reclaim; in kswapd_shrink_zone()
3067 return sc->nr_scanned >= sc->nr_to_reclaim; in kswapd_shrink_zone()
3098 struct scan_control sc = { in balance_pgdat() local
3113 sc.nr_reclaimed = 0; in balance_pgdat()
3125 if (sc.priority != DEF_PRIORITY && in balance_pgdat()
3133 age_active_anon(zone, &sc); in balance_pgdat()
3184 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
3185 sc.may_writepage = 1; in balance_pgdat()
3202 if (sc.priority != DEF_PRIORITY && in balance_pgdat()
3206 sc.nr_scanned = 0; in balance_pgdat()
3213 order, sc.gfp_mask, in balance_pgdat()
3215 sc.nr_reclaimed += nr_soft_reclaimed; in balance_pgdat()
3224 &sc, &nr_attempted)) in balance_pgdat()
3245 if (order && sc.nr_reclaimed >= 2UL << order) in balance_pgdat()
3246 order = sc.order = 0; in balance_pgdat()
3256 if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted) in balance_pgdat()
3263 if (raise_priority || !sc.nr_reclaimed) in balance_pgdat()
3264 sc.priority--; in balance_pgdat()
3265 } while (sc.priority >= 1 && in balance_pgdat()
3481 struct scan_control sc = { in shrink_all_memory() local
3490 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in shrink_all_memory()
3495 lockdep_set_current_reclaim_state(sc.gfp_mask); in shrink_all_memory()
3499 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in shrink_all_memory()
3667 struct scan_control sc = { in __zone_reclaim() local
3694 shrink_zone(zone, &sc, true); in __zone_reclaim()
3695 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __zone_reclaim()
3701 return sc.nr_reclaimed >= nr_pages; in __zone_reclaim()