Lines Matching refs:sc
153 static bool global_reclaim(struct scan_control *sc) in global_reclaim() argument
155 return !sc->target_mem_cgroup; in global_reclaim()
171 static bool sane_reclaim(struct scan_control *sc) in sane_reclaim() argument
173 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim()
184 static bool global_reclaim(struct scan_control *sc) in global_reclaim() argument
189 static bool sane_reclaim(struct scan_control *sc) in sane_reclaim() argument
432 struct shrink_control sc = { in shrink_slab() local
442 sc.nid = 0; in shrink_slab()
444 freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); in shrink_slab()
486 static int may_write_to_inode(struct inode *inode, struct scan_control *sc) in may_write_to_inode() argument
535 struct scan_control *sc) in pageout() argument
571 if (!may_write_to_inode(mapping->host, sc)) in pageout()
794 struct scan_control *sc) in page_check_references() argument
799 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, in page_check_references()
882 struct scan_control *sc, in shrink_page_list() argument
921 sc->nr_scanned++; in shrink_page_list()
926 if (!sc->may_unmap && page_mapped(page)) in shrink_page_list()
931 sc->nr_scanned++; in shrink_page_list()
933 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
934 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1003 } else if (sane_reclaim(sc) || in shrink_page_list()
1031 references = page_check_references(page, sc); in shrink_page_list()
1048 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
1101 if (!sc->may_writepage) in shrink_page_list()
1110 switch (pageout(page, mapping, sc)) { in shrink_page_list()
1157 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1237 struct scan_control sc = { in reclaim_clean_pages_from_list() local
1254 ret = shrink_page_list(&clean_pages, zone, &sc, in reclaim_clean_pages_from_list()
1358 unsigned long *nr_scanned, struct scan_control *sc, in isolate_lru_pages() argument
1394 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, in isolate_lru_pages()
1456 struct scan_control *sc) in too_many_isolated() argument
1463 if (!sane_reclaim(sc)) in too_many_isolated()
1479 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()
1559 struct scan_control *sc, enum lru_list lru) in shrink_inactive_list() argument
1575 while (unlikely(too_many_isolated(zone, file, sc))) { in shrink_inactive_list()
1585 if (!sc->may_unmap) in shrink_inactive_list()
1587 if (!sc->may_writepage) in shrink_inactive_list()
1593 &nr_scanned, sc, isolate_mode, lru); in shrink_inactive_list()
1598 if (global_reclaim(sc)) { in shrink_inactive_list()
1610 nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, in shrink_inactive_list()
1619 if (global_reclaim(sc)) { in shrink_inactive_list()
1658 if (sane_reclaim(sc)) { in shrink_inactive_list()
1690 if (!sc->hibernation_mode && !current_is_kswapd() && in shrink_inactive_list()
1697 sc->priority, in shrink_inactive_list()
1763 struct scan_control *sc, in shrink_active_list() argument
1781 if (!sc->may_unmap) in shrink_active_list()
1783 if (!sc->may_writepage) in shrink_active_list()
1789 &nr_scanned, sc, isolate_mode, lru); in shrink_active_list()
1790 if (global_reclaim(sc)) in shrink_active_list()
1818 if (page_referenced(page, 0, sc->target_mem_cgroup, in shrink_active_list()
1934 struct lruvec *lruvec, struct scan_control *sc) in shrink_list() argument
1938 shrink_active_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
1942 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); in shrink_list()
1962 struct scan_control *sc, unsigned long *nr, in get_scan_count() argument
1994 if (!global_reclaim(sc)) in get_scan_count()
1998 if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { in get_scan_count()
2010 if (!global_reclaim(sc) && !swappiness) { in get_scan_count()
2020 if (!sc->priority && swappiness) { in get_scan_count()
2034 if (global_reclaim(sc)) { in get_scan_count()
2120 scan = size >> sc->priority; in get_scan_count()
2183 struct scan_control *sc, unsigned long *lru_pages) in shrink_lruvec() argument
2190 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
2194 get_scan_count(lruvec, swappiness, sc, nr, lru_pages); in shrink_lruvec()
2210 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && in shrink_lruvec()
2211 sc->priority == DEF_PRIORITY); in shrink_lruvec()
2227 lruvec, sc); in shrink_lruvec()
2286 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
2294 sc, LRU_ACTIVE_ANON); in shrink_lruvec()
2296 throttle_vm_writeout(sc->gfp_mask); in shrink_lruvec()
2300 static bool in_reclaim_compaction(struct scan_control *sc) in in_reclaim_compaction() argument
2302 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction()
2303 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
2304 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
2320 struct scan_control *sc) in should_continue_reclaim() argument
2326 if (!in_reclaim_compaction(sc)) in should_continue_reclaim()
2330 if (sc->gfp_mask & __GFP_REPEAT) { in should_continue_reclaim()
2356 pages_for_compaction = (2UL << sc->order); in should_continue_reclaim()
2360 if (sc->nr_reclaimed < pages_for_compaction && in should_continue_reclaim()
2365 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim()
2374 static bool shrink_zone(struct zone *zone, struct scan_control *sc, in shrink_zone() argument
2382 struct mem_cgroup *root = sc->target_mem_cgroup; in shrink_zone()
2385 .priority = sc->priority, in shrink_zone()
2390 nr_reclaimed = sc->nr_reclaimed; in shrink_zone()
2391 nr_scanned = sc->nr_scanned; in shrink_zone()
2401 if (!sc->may_thrash) in shrink_zone()
2408 scanned = sc->nr_scanned; in shrink_zone()
2410 shrink_lruvec(lruvec, swappiness, sc, &lru_pages); in shrink_zone()
2414 shrink_slab(sc->gfp_mask, zone_to_nid(zone), in shrink_zone()
2415 memcg, sc->nr_scanned - scanned, in shrink_zone()
2428 if (!global_reclaim(sc) && in shrink_zone()
2429 sc->nr_reclaimed >= sc->nr_to_reclaim) { in shrink_zone()
2439 if (global_reclaim(sc) && is_classzone) in shrink_zone()
2440 shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, in shrink_zone()
2441 sc->nr_scanned - nr_scanned, in shrink_zone()
2445 sc->nr_reclaimed += reclaim_state->reclaimed_slab; in shrink_zone()
2449 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, in shrink_zone()
2450 sc->nr_scanned - nr_scanned, in shrink_zone()
2451 sc->nr_reclaimed - nr_reclaimed); in shrink_zone()
2453 if (sc->nr_reclaimed - nr_reclaimed) in shrink_zone()
2456 } while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed, in shrink_zone()
2457 sc->nr_scanned - nr_scanned, sc)); in shrink_zone()
2517 static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument
2524 enum zone_type requested_highidx = gfp_zone(sc->gfp_mask); in shrink_zones()
2532 orig_mask = sc->gfp_mask; in shrink_zones()
2534 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
2537 gfp_zone(sc->gfp_mask), sc->nodemask) { in shrink_zones()
2552 if (global_reclaim(sc)) { in shrink_zones()
2557 if (sc->priority != DEF_PRIORITY && in shrink_zones()
2571 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
2573 compaction_ready(zone, sc->order)) { in shrink_zones()
2574 sc->compaction_ready = true; in shrink_zones()
2586 sc->order, sc->gfp_mask, in shrink_zones()
2588 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
2589 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
2595 if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx)) in shrink_zones()
2598 if (global_reclaim(sc) && in shrink_zones()
2607 sc->gfp_mask = orig_mask; in shrink_zones()
2629 struct scan_control *sc) in do_try_to_free_pages() argument
2631 int initial_priority = sc->priority; in do_try_to_free_pages()
2638 if (global_reclaim(sc)) in do_try_to_free_pages()
2642 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
2643 sc->priority); in do_try_to_free_pages()
2644 sc->nr_scanned = 0; in do_try_to_free_pages()
2645 zones_reclaimable = shrink_zones(zonelist, sc); in do_try_to_free_pages()
2647 total_scanned += sc->nr_scanned; in do_try_to_free_pages()
2648 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
2651 if (sc->compaction_ready) in do_try_to_free_pages()
2658 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
2659 sc->may_writepage = 1; in do_try_to_free_pages()
2668 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; in do_try_to_free_pages()
2672 sc->may_writepage = 1; in do_try_to_free_pages()
2674 } while (--sc->priority >= 0); in do_try_to_free_pages()
2678 if (sc->nr_reclaimed) in do_try_to_free_pages()
2679 return sc->nr_reclaimed; in do_try_to_free_pages()
2682 if (sc->compaction_ready) in do_try_to_free_pages()
2686 if (!sc->may_thrash) { in do_try_to_free_pages()
2687 sc->priority = initial_priority; in do_try_to_free_pages()
2688 sc->may_thrash = 1; in do_try_to_free_pages()
2830 struct scan_control sc = { in try_to_free_pages() local
2850 sc.may_writepage, in try_to_free_pages()
2853 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages()
2867 struct scan_control sc = { in mem_cgroup_shrink_node_zone() local
2878 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | in mem_cgroup_shrink_node_zone()
2881 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, in mem_cgroup_shrink_node_zone()
2882 sc.may_writepage, in mem_cgroup_shrink_node_zone()
2883 sc.gfp_mask); in mem_cgroup_shrink_node_zone()
2892 shrink_lruvec(lruvec, swappiness, &sc, &lru_pages); in mem_cgroup_shrink_node_zone()
2894 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); in mem_cgroup_shrink_node_zone()
2896 *nr_scanned = sc.nr_scanned; in mem_cgroup_shrink_node_zone()
2897 return sc.nr_reclaimed; in mem_cgroup_shrink_node_zone()
2908 struct scan_control sc = { in try_to_free_mem_cgroup_pages() local
2929 sc.may_writepage, in try_to_free_mem_cgroup_pages()
2930 sc.gfp_mask); in try_to_free_mem_cgroup_pages()
2932 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_mem_cgroup_pages()
2940 static void age_active_anon(struct zone *zone, struct scan_control *sc) in age_active_anon() argument
2953 sc, LRU_ACTIVE_ANON); in age_active_anon()
3074 struct scan_control *sc, in kswapd_shrink_zone() argument
3077 int testorder = sc->order; in kswapd_shrink_zone()
3082 sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone)); in kswapd_shrink_zone()
3090 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in kswapd_shrink_zone()
3091 compaction_suitable(zone, sc->order, 0, classzone_idx) in kswapd_shrink_zone()
3113 shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); in kswapd_shrink_zone()
3116 *nr_attempted += sc->nr_to_reclaim; in kswapd_shrink_zone()
3132 return sc->nr_scanned >= sc->nr_to_reclaim; in kswapd_shrink_zone()
3163 struct scan_control sc = { in balance_pgdat() local
3178 sc.nr_reclaimed = 0; in balance_pgdat()
3190 if (sc.priority != DEF_PRIORITY && in balance_pgdat()
3198 age_active_anon(zone, &sc); in balance_pgdat()
3249 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
3250 sc.may_writepage = 1; in balance_pgdat()
3267 if (sc.priority != DEF_PRIORITY && in balance_pgdat()
3271 sc.nr_scanned = 0; in balance_pgdat()
3278 order, sc.gfp_mask, in balance_pgdat()
3280 sc.nr_reclaimed += nr_soft_reclaimed; in balance_pgdat()
3289 &sc, &nr_attempted)) in balance_pgdat()
3310 if (order && sc.nr_reclaimed >= 2UL << order) in balance_pgdat()
3311 order = sc.order = 0; in balance_pgdat()
3321 if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted) in balance_pgdat()
3328 if (raise_priority || !sc.nr_reclaimed) in balance_pgdat()
3329 sc.priority--; in balance_pgdat()
3330 } while (sc.priority >= 1 && in balance_pgdat()
3546 struct scan_control sc = { in shrink_all_memory() local
3555 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in shrink_all_memory()
3560 lockdep_set_current_reclaim_state(sc.gfp_mask); in shrink_all_memory()
3564 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in shrink_all_memory()
3732 struct scan_control sc = { in __zone_reclaim() local
3759 shrink_zone(zone, &sc, true); in __zone_reclaim()
3760 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __zone_reclaim()
3766 return sc.nr_reclaimed >= nr_pages; in __zone_reclaim()