1/*
2 *  linux/mm/vmscan.c
3 *
4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5 *
6 *  Swap reorganised 29.12.95, Stephen Tweedie.
7 *  kswapd added: 7.1.96  sct
8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 *  Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/gfp.h>
19#include <linux/kernel_stat.h>
20#include <linux/swap.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/vmpressure.h>
25#include <linux/vmstat.h>
26#include <linux/file.h>
27#include <linux/writeback.h>
28#include <linux/blkdev.h>
29#include <linux/buffer_head.h>	/* for try_to_release_page(),
30					buffer_heads_over_limit */
31#include <linux/mm_inline.h>
32#include <linux/backing-dev.h>
33#include <linux/rmap.h>
34#include <linux/topology.h>
35#include <linux/cpu.h>
36#include <linux/cpuset.h>
37#include <linux/compaction.h>
38#include <linux/notifier.h>
39#include <linux/rwsem.h>
40#include <linux/delay.h>
41#include <linux/kthread.h>
42#include <linux/freezer.h>
43#include <linux/memcontrol.h>
44#include <linux/delayacct.h>
45#include <linux/sysctl.h>
46#include <linux/oom.h>
47#include <linux/prefetch.h>
48#include <linux/printk.h>
49
50#include <asm/tlbflush.h>
51#include <asm/div64.h>
52
53#include <linux/swapops.h>
54#include <linux/balloon_compaction.h>
55
56#include "internal.h"
57
58#define CREATE_TRACE_POINTS
59#include <trace/events/vmscan.h>
60
61struct scan_control {
62	/* How many pages shrink_list() should reclaim */
63	unsigned long nr_to_reclaim;
64
65	/* This context's GFP mask */
66	gfp_t gfp_mask;
67
68	/* Allocation order */
69	int order;
70
71	/*
72	 * Nodemask of nodes allowed by the caller. If NULL, all nodes
73	 * are scanned.
74	 */
75	nodemask_t	*nodemask;
76
77	/*
78	 * The memory cgroup that hit its limit and as a result is the
79	 * primary target of this reclaim invocation.
80	 */
81	struct mem_cgroup *target_mem_cgroup;
82
83	/* Scan (total_size >> priority) pages at once */
84	int priority;
85
86	unsigned int may_writepage:1;
87
88	/* Can mapped pages be reclaimed? */
89	unsigned int may_unmap:1;
90
91	/* Can pages be swapped as part of reclaim? */
92	unsigned int may_swap:1;
93
94	/* Can cgroups be reclaimed below their normal consumption range? */
95	unsigned int may_thrash:1;
96
97	unsigned int hibernation_mode:1;
98
99	/* One of the zones is ready for compaction */
100	unsigned int compaction_ready:1;
101
102	/* Incremented by the number of inactive pages that were scanned */
103	unsigned long nr_scanned;
104
105	/* Number of pages freed so far during a call to shrink_zones() */
106	unsigned long nr_reclaimed;
107};
108
109#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
110
111#ifdef ARCH_HAS_PREFETCH
112#define prefetch_prev_lru_page(_page, _base, _field)			\
113	do {								\
114		if ((_page)->lru.prev != _base) {			\
115			struct page *prev;				\
116									\
117			prev = lru_to_page(&(_page->lru));		\
118			prefetch(&prev->_field);			\
119		}							\
120	} while (0)
121#else
122#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
123#endif
124
125#ifdef ARCH_HAS_PREFETCHW
126#define prefetchw_prev_lru_page(_page, _base, _field)			\
127	do {								\
128		if ((_page)->lru.prev != _base) {			\
129			struct page *prev;				\
130									\
131			prev = lru_to_page(&(_page->lru));		\
132			prefetchw(&prev->_field);			\
133		}							\
134	} while (0)
135#else
136#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
137#endif
138
139/*
140 * From 0 .. 100.  Higher means more swappy.
141 */
142int vm_swappiness = 60;
143/*
144 * The total number of pages which are beyond the high watermark within all
145 * zones.
146 */
147unsigned long vm_total_pages;
148
149static LIST_HEAD(shrinker_list);
150static DECLARE_RWSEM(shrinker_rwsem);
151
152#ifdef CONFIG_MEMCG
153static bool global_reclaim(struct scan_control *sc)
154{
155	return !sc->target_mem_cgroup;
156}
157#else
158static bool global_reclaim(struct scan_control *sc)
159{
160	return true;
161}
162#endif
163
164static unsigned long zone_reclaimable_pages(struct zone *zone)
165{
166	int nr;
167
168	nr = zone_page_state(zone, NR_ACTIVE_FILE) +
169	     zone_page_state(zone, NR_INACTIVE_FILE);
170
171	if (get_nr_swap_pages() > 0)
172		nr += zone_page_state(zone, NR_ACTIVE_ANON) +
173		      zone_page_state(zone, NR_INACTIVE_ANON);
174
175	return nr;
176}
177
178bool zone_reclaimable(struct zone *zone)
179{
180	return zone_page_state(zone, NR_PAGES_SCANNED) <
181		zone_reclaimable_pages(zone) * 6;
182}
183
184static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
185{
186	if (!mem_cgroup_disabled())
187		return mem_cgroup_get_lru_size(lruvec, lru);
188
189	return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
190}
191
192/*
193 * Add a shrinker callback to be called from the vm.
194 */
195int register_shrinker(struct shrinker *shrinker)
196{
197	size_t size = sizeof(*shrinker->nr_deferred);
198
199	/*
200	 * If we only have one possible node in the system anyway, save
201	 * ourselves the trouble and disable NUMA aware behavior. This way we
202	 * will save memory and some small loop time later.
203	 */
204	if (nr_node_ids == 1)
205		shrinker->flags &= ~SHRINKER_NUMA_AWARE;
206
207	if (shrinker->flags & SHRINKER_NUMA_AWARE)
208		size *= nr_node_ids;
209
210	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
211	if (!shrinker->nr_deferred)
212		return -ENOMEM;
213
214	down_write(&shrinker_rwsem);
215	list_add_tail(&shrinker->list, &shrinker_list);
216	up_write(&shrinker_rwsem);
217	return 0;
218}
219EXPORT_SYMBOL(register_shrinker);
220
221/*
222 * Remove one
223 */
224void unregister_shrinker(struct shrinker *shrinker)
225{
226	down_write(&shrinker_rwsem);
227	list_del(&shrinker->list);
228	up_write(&shrinker_rwsem);
229	kfree(shrinker->nr_deferred);
230}
231EXPORT_SYMBOL(unregister_shrinker);
232
233#define SHRINK_BATCH 128
234
235static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
236				    struct shrinker *shrinker,
237				    unsigned long nr_scanned,
238				    unsigned long nr_eligible)
239{
240	unsigned long freed = 0;
241	unsigned long long delta;
242	long total_scan;
243	long freeable;
244	long nr;
245	long new_nr;
246	int nid = shrinkctl->nid;
247	long batch_size = shrinker->batch ? shrinker->batch
248					  : SHRINK_BATCH;
249
250	freeable = shrinker->count_objects(shrinker, shrinkctl);
251	if (freeable == 0)
252		return 0;
253
254	/*
255	 * copy the current shrinker scan count into a local variable
256	 * and zero it so that other concurrent shrinker invocations
257	 * don't also do this scanning work.
258	 */
259	nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
260
261	total_scan = nr;
262	delta = (4 * nr_scanned) / shrinker->seeks;
263	delta *= freeable;
264	do_div(delta, nr_eligible + 1);
265	total_scan += delta;
266	if (total_scan < 0) {
267		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
268		       shrinker->scan_objects, total_scan);
269		total_scan = freeable;
270	}
271
272	/*
273	 * We need to avoid excessive windup on filesystem shrinkers
274	 * due to large numbers of GFP_NOFS allocations causing the
275	 * shrinkers to return -1 all the time. This results in a large
276	 * nr being built up so when a shrink that can do some work
277	 * comes along it empties the entire cache due to nr >>>
278	 * freeable. This is bad for sustaining a working set in
279	 * memory.
280	 *
281	 * Hence only allow the shrinker to scan the entire cache when
282	 * a large delta change is calculated directly.
283	 */
284	if (delta < freeable / 4)
285		total_scan = min(total_scan, freeable / 2);
286
287	/*
288	 * Avoid risking looping forever due to too large nr value:
289	 * never try to free more than twice the estimate number of
290	 * freeable entries.
291	 */
292	if (total_scan > freeable * 2)
293		total_scan = freeable * 2;
294
295	trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
296				   nr_scanned, nr_eligible,
297				   freeable, delta, total_scan);
298
299	/*
300	 * Normally, we should not scan less than batch_size objects in one
301	 * pass to avoid too frequent shrinker calls, but if the slab has less
302	 * than batch_size objects in total and we are really tight on memory,
303	 * we will try to reclaim all available objects, otherwise we can end
304	 * up failing allocations although there are plenty of reclaimable
305	 * objects spread over several slabs with usage less than the
306	 * batch_size.
307	 *
308	 * We detect the "tight on memory" situations by looking at the total
309	 * number of objects we want to scan (total_scan). If it is greater
310	 * than the total number of objects on slab (freeable), we must be
311	 * scanning at high prio and therefore should try to reclaim as much as
312	 * possible.
313	 */
314	while (total_scan >= batch_size ||
315	       total_scan >= freeable) {
316		unsigned long ret;
317		unsigned long nr_to_scan = min(batch_size, total_scan);
318
319		shrinkctl->nr_to_scan = nr_to_scan;
320		ret = shrinker->scan_objects(shrinker, shrinkctl);
321		if (ret == SHRINK_STOP)
322			break;
323		freed += ret;
324
325		count_vm_events(SLABS_SCANNED, nr_to_scan);
326		total_scan -= nr_to_scan;
327
328		cond_resched();
329	}
330
331	/*
332	 * move the unused scan count back into the shrinker in a
333	 * manner that handles concurrent updates. If we exhausted the
334	 * scan, there is no need to do an update.
335	 */
336	if (total_scan > 0)
337		new_nr = atomic_long_add_return(total_scan,
338						&shrinker->nr_deferred[nid]);
339	else
340		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
341
342	trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
343	return freed;
344}
345
346/**
347 * shrink_slab - shrink slab caches
348 * @gfp_mask: allocation context
349 * @nid: node whose slab caches to target
350 * @memcg: memory cgroup whose slab caches to target
351 * @nr_scanned: pressure numerator
352 * @nr_eligible: pressure denominator
353 *
354 * Call the shrink functions to age shrinkable caches.
355 *
356 * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set,
357 * unaware shrinkers will receive a node id of 0 instead.
358 *
359 * @memcg specifies the memory cgroup to target. If it is not NULL,
360 * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan
361 * objects from the memory cgroup specified. Otherwise all shrinkers
362 * are called, and memcg aware shrinkers are supposed to scan the
363 * global list then.
364 *
365 * @nr_scanned and @nr_eligible form a ratio that indicate how much of
366 * the available objects should be scanned.  Page reclaim for example
367 * passes the number of pages scanned and the number of pages on the
368 * LRU lists that it considered on @nid, plus a bias in @nr_scanned
369 * when it encountered mapped pages.  The ratio is further biased by
370 * the ->seeks setting of the shrink function, which indicates the
371 * cost to recreate an object relative to that of an LRU page.
372 *
373 * Returns the number of reclaimed slab objects.
374 */
375static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
376				 struct mem_cgroup *memcg,
377				 unsigned long nr_scanned,
378				 unsigned long nr_eligible)
379{
380	struct shrinker *shrinker;
381	unsigned long freed = 0;
382
383	if (memcg && !memcg_kmem_is_active(memcg))
384		return 0;
385
386	if (nr_scanned == 0)
387		nr_scanned = SWAP_CLUSTER_MAX;
388
389	if (!down_read_trylock(&shrinker_rwsem)) {
390		/*
391		 * If we would return 0, our callers would understand that we
392		 * have nothing else to shrink and give up trying. By returning
393		 * 1 we keep it going and assume we'll be able to shrink next
394		 * time.
395		 */
396		freed = 1;
397		goto out;
398	}
399
400	list_for_each_entry(shrinker, &shrinker_list, list) {
401		struct shrink_control sc = {
402			.gfp_mask = gfp_mask,
403			.nid = nid,
404			.memcg = memcg,
405		};
406
407		if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
408			continue;
409
410		if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
411			sc.nid = 0;
412
413		freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
414	}
415
416	up_read(&shrinker_rwsem);
417out:
418	cond_resched();
419	return freed;
420}
421
422void drop_slab_node(int nid)
423{
424	unsigned long freed;
425
426	do {
427		struct mem_cgroup *memcg = NULL;
428
429		freed = 0;
430		do {
431			freed += shrink_slab(GFP_KERNEL, nid, memcg,
432					     1000, 1000);
433		} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
434	} while (freed > 10);
435}
436
437void drop_slab(void)
438{
439	int nid;
440
441	for_each_online_node(nid)
442		drop_slab_node(nid);
443}
444
445static inline int is_page_cache_freeable(struct page *page)
446{
447	/*
448	 * A freeable page cache page is referenced only by the caller
449	 * that isolated the page, the page cache radix tree and
450	 * optional buffer heads at page->private.
451	 */
452	return page_count(page) - page_has_private(page) == 2;
453}
454
455static int may_write_to_queue(struct backing_dev_info *bdi,
456			      struct scan_control *sc)
457{
458	if (current->flags & PF_SWAPWRITE)
459		return 1;
460	if (!bdi_write_congested(bdi))
461		return 1;
462	if (bdi == current->backing_dev_info)
463		return 1;
464	return 0;
465}
466
467/*
468 * We detected a synchronous write error writing a page out.  Probably
469 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
470 * fsync(), msync() or close().
471 *
472 * The tricky part is that after writepage we cannot touch the mapping: nothing
473 * prevents it from being freed up.  But we have a ref on the page and once
474 * that page is locked, the mapping is pinned.
475 *
476 * We're allowed to run sleeping lock_page() here because we know the caller has
477 * __GFP_FS.
478 */
479static void handle_write_error(struct address_space *mapping,
480				struct page *page, int error)
481{
482	lock_page(page);
483	if (page_mapping(page) == mapping)
484		mapping_set_error(mapping, error);
485	unlock_page(page);
486}
487
488/* possible outcome of pageout() */
489typedef enum {
490	/* failed to write page out, page is locked */
491	PAGE_KEEP,
492	/* move page to the active list, page is locked */
493	PAGE_ACTIVATE,
494	/* page has been sent to the disk successfully, page is unlocked */
495	PAGE_SUCCESS,
496	/* page is clean and locked */
497	PAGE_CLEAN,
498} pageout_t;
499
500/*
501 * pageout is called by shrink_page_list() for each dirty page.
502 * Calls ->writepage().
503 */
504static pageout_t pageout(struct page *page, struct address_space *mapping,
505			 struct scan_control *sc)
506{
507	/*
508	 * If the page is dirty, only perform writeback if that write
509	 * will be non-blocking.  To prevent this allocation from being
510	 * stalled by pagecache activity.  But note that there may be
511	 * stalls if we need to run get_block().  We could test
512	 * PagePrivate for that.
513	 *
514	 * If this process is currently in __generic_file_write_iter() against
515	 * this page's queue, we can perform writeback even if that
516	 * will block.
517	 *
518	 * If the page is swapcache, write it back even if that would
519	 * block, for some throttling. This happens by accident, because
520	 * swap_backing_dev_info is bust: it doesn't reflect the
521	 * congestion state of the swapdevs.  Easy to fix, if needed.
522	 */
523	if (!is_page_cache_freeable(page))
524		return PAGE_KEEP;
525	if (!mapping) {
526		/*
527		 * Some data journaling orphaned pages can have
528		 * page->mapping == NULL while being dirty with clean buffers.
529		 */
530		if (page_has_private(page)) {
531			if (try_to_free_buffers(page)) {
532				ClearPageDirty(page);
533				pr_info("%s: orphaned page\n", __func__);
534				return PAGE_CLEAN;
535			}
536		}
537		return PAGE_KEEP;
538	}
539	if (mapping->a_ops->writepage == NULL)
540		return PAGE_ACTIVATE;
541	if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
542		return PAGE_KEEP;
543
544	if (clear_page_dirty_for_io(page)) {
545		int res;
546		struct writeback_control wbc = {
547			.sync_mode = WB_SYNC_NONE,
548			.nr_to_write = SWAP_CLUSTER_MAX,
549			.range_start = 0,
550			.range_end = LLONG_MAX,
551			.for_reclaim = 1,
552		};
553
554		SetPageReclaim(page);
555		res = mapping->a_ops->writepage(page, &wbc);
556		if (res < 0)
557			handle_write_error(mapping, page, res);
558		if (res == AOP_WRITEPAGE_ACTIVATE) {
559			ClearPageReclaim(page);
560			return PAGE_ACTIVATE;
561		}
562
563		if (!PageWriteback(page)) {
564			/* synchronous write or broken a_ops? */
565			ClearPageReclaim(page);
566		}
567		trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
568		inc_zone_page_state(page, NR_VMSCAN_WRITE);
569		return PAGE_SUCCESS;
570	}
571
572	return PAGE_CLEAN;
573}
574
575/*
576 * Same as remove_mapping, but if the page is removed from the mapping, it
577 * gets returned with a refcount of 0.
578 */
579static int __remove_mapping(struct address_space *mapping, struct page *page,
580			    bool reclaimed)
581{
582	BUG_ON(!PageLocked(page));
583	BUG_ON(mapping != page_mapping(page));
584
585	spin_lock_irq(&mapping->tree_lock);
586	/*
587	 * The non racy check for a busy page.
588	 *
589	 * Must be careful with the order of the tests. When someone has
590	 * a ref to the page, it may be possible that they dirty it then
591	 * drop the reference. So if PageDirty is tested before page_count
592	 * here, then the following race may occur:
593	 *
594	 * get_user_pages(&page);
595	 * [user mapping goes away]
596	 * write_to(page);
597	 *				!PageDirty(page)    [good]
598	 * SetPageDirty(page);
599	 * put_page(page);
600	 *				!page_count(page)   [good, discard it]
601	 *
602	 * [oops, our write_to data is lost]
603	 *
604	 * Reversing the order of the tests ensures such a situation cannot
605	 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
606	 * load is not satisfied before that of page->_count.
607	 *
608	 * Note that if SetPageDirty is always performed via set_page_dirty,
609	 * and thus under tree_lock, then this ordering is not required.
610	 */
611	if (!page_freeze_refs(page, 2))
612		goto cannot_free;
613	/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
614	if (unlikely(PageDirty(page))) {
615		page_unfreeze_refs(page, 2);
616		goto cannot_free;
617	}
618
619	if (PageSwapCache(page)) {
620		swp_entry_t swap = { .val = page_private(page) };
621		mem_cgroup_swapout(page, swap);
622		__delete_from_swap_cache(page);
623		spin_unlock_irq(&mapping->tree_lock);
624		swapcache_free(swap);
625	} else {
626		void (*freepage)(struct page *);
627		void *shadow = NULL;
628
629		freepage = mapping->a_ops->freepage;
630		/*
631		 * Remember a shadow entry for reclaimed file cache in
632		 * order to detect refaults, thus thrashing, later on.
633		 *
634		 * But don't store shadows in an address space that is
635		 * already exiting.  This is not just an optizimation,
636		 * inode reclaim needs to empty out the radix tree or
637		 * the nodes are lost.  Don't plant shadows behind its
638		 * back.
639		 */
640		if (reclaimed && page_is_file_cache(page) &&
641		    !mapping_exiting(mapping))
642			shadow = workingset_eviction(mapping, page);
643		__delete_from_page_cache(page, shadow);
644		spin_unlock_irq(&mapping->tree_lock);
645
646		if (freepage != NULL)
647			freepage(page);
648	}
649
650	return 1;
651
652cannot_free:
653	spin_unlock_irq(&mapping->tree_lock);
654	return 0;
655}
656
657/*
658 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
659 * someone else has a ref on the page, abort and return 0.  If it was
660 * successfully detached, return 1.  Assumes the caller has a single ref on
661 * this page.
662 */
663int remove_mapping(struct address_space *mapping, struct page *page)
664{
665	if (__remove_mapping(mapping, page, false)) {
666		/*
667		 * Unfreezing the refcount with 1 rather than 2 effectively
668		 * drops the pagecache ref for us without requiring another
669		 * atomic operation.
670		 */
671		page_unfreeze_refs(page, 1);
672		return 1;
673	}
674	return 0;
675}
676
677/**
678 * putback_lru_page - put previously isolated page onto appropriate LRU list
679 * @page: page to be put back to appropriate lru list
680 *
681 * Add previously isolated @page to appropriate LRU list.
682 * Page may still be unevictable for other reasons.
683 *
684 * lru_lock must not be held, interrupts must be enabled.
685 */
686void putback_lru_page(struct page *page)
687{
688	bool is_unevictable;
689	int was_unevictable = PageUnevictable(page);
690
691	VM_BUG_ON_PAGE(PageLRU(page), page);
692
693redo:
694	ClearPageUnevictable(page);
695
696	if (page_evictable(page)) {
697		/*
698		 * For evictable pages, we can use the cache.
699		 * In event of a race, worst case is we end up with an
700		 * unevictable page on [in]active list.
701		 * We know how to handle that.
702		 */
703		is_unevictable = false;
704		lru_cache_add(page);
705	} else {
706		/*
707		 * Put unevictable pages directly on zone's unevictable
708		 * list.
709		 */
710		is_unevictable = true;
711		add_page_to_unevictable_list(page);
712		/*
713		 * When racing with an mlock or AS_UNEVICTABLE clearing
714		 * (page is unlocked) make sure that if the other thread
715		 * does not observe our setting of PG_lru and fails
716		 * isolation/check_move_unevictable_pages,
717		 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
718		 * the page back to the evictable list.
719		 *
720		 * The other side is TestClearPageMlocked() or shmem_lock().
721		 */
722		smp_mb();
723	}
724
725	/*
726	 * page's status can change while we move it among lru. If an evictable
727	 * page is on unevictable list, it never be freed. To avoid that,
728	 * check after we added it to the list, again.
729	 */
730	if (is_unevictable && page_evictable(page)) {
731		if (!isolate_lru_page(page)) {
732			put_page(page);
733			goto redo;
734		}
735		/* This means someone else dropped this page from LRU
736		 * So, it will be freed or putback to LRU again. There is
737		 * nothing to do here.
738		 */
739	}
740
741	if (was_unevictable && !is_unevictable)
742		count_vm_event(UNEVICTABLE_PGRESCUED);
743	else if (!was_unevictable && is_unevictable)
744		count_vm_event(UNEVICTABLE_PGCULLED);
745
746	put_page(page);		/* drop ref from isolate */
747}
748
749enum page_references {
750	PAGEREF_RECLAIM,
751	PAGEREF_RECLAIM_CLEAN,
752	PAGEREF_KEEP,
753	PAGEREF_ACTIVATE,
754};
755
756static enum page_references page_check_references(struct page *page,
757						  struct scan_control *sc)
758{
759	int referenced_ptes, referenced_page;
760	unsigned long vm_flags;
761
762	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
763					  &vm_flags);
764	referenced_page = TestClearPageReferenced(page);
765
766	/*
767	 * Mlock lost the isolation race with us.  Let try_to_unmap()
768	 * move the page to the unevictable list.
769	 */
770	if (vm_flags & VM_LOCKED)
771		return PAGEREF_RECLAIM;
772
773	if (referenced_ptes) {
774		if (PageSwapBacked(page))
775			return PAGEREF_ACTIVATE;
776		/*
777		 * All mapped pages start out with page table
778		 * references from the instantiating fault, so we need
779		 * to look twice if a mapped file page is used more
780		 * than once.
781		 *
782		 * Mark it and spare it for another trip around the
783		 * inactive list.  Another page table reference will
784		 * lead to its activation.
785		 *
786		 * Note: the mark is set for activated pages as well
787		 * so that recently deactivated but used pages are
788		 * quickly recovered.
789		 */
790		SetPageReferenced(page);
791
792		if (referenced_page || referenced_ptes > 1)
793			return PAGEREF_ACTIVATE;
794
795		/*
796		 * Activate file-backed executable pages after first usage.
797		 */
798		if (vm_flags & VM_EXEC)
799			return PAGEREF_ACTIVATE;
800
801		return PAGEREF_KEEP;
802	}
803
804	/* Reclaim if clean, defer dirty pages to writeback */
805	if (referenced_page && !PageSwapBacked(page))
806		return PAGEREF_RECLAIM_CLEAN;
807
808	return PAGEREF_RECLAIM;
809}
810
811/* Check if a page is dirty or under writeback */
812static void page_check_dirty_writeback(struct page *page,
813				       bool *dirty, bool *writeback)
814{
815	struct address_space *mapping;
816
817	/*
818	 * Anonymous pages are not handled by flushers and must be written
819	 * from reclaim context. Do not stall reclaim based on them
820	 */
821	if (!page_is_file_cache(page)) {
822		*dirty = false;
823		*writeback = false;
824		return;
825	}
826
827	/* By default assume that the page flags are accurate */
828	*dirty = PageDirty(page);
829	*writeback = PageWriteback(page);
830
831	/* Verify dirty/writeback state if the filesystem supports it */
832	if (!page_has_private(page))
833		return;
834
835	mapping = page_mapping(page);
836	if (mapping && mapping->a_ops->is_dirty_writeback)
837		mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
838}
839
840/*
841 * shrink_page_list() returns the number of reclaimed pages
842 */
843static unsigned long shrink_page_list(struct list_head *page_list,
844				      struct zone *zone,
845				      struct scan_control *sc,
846				      enum ttu_flags ttu_flags,
847				      unsigned long *ret_nr_dirty,
848				      unsigned long *ret_nr_unqueued_dirty,
849				      unsigned long *ret_nr_congested,
850				      unsigned long *ret_nr_writeback,
851				      unsigned long *ret_nr_immediate,
852				      bool force_reclaim)
853{
854	LIST_HEAD(ret_pages);
855	LIST_HEAD(free_pages);
856	int pgactivate = 0;
857	unsigned long nr_unqueued_dirty = 0;
858	unsigned long nr_dirty = 0;
859	unsigned long nr_congested = 0;
860	unsigned long nr_reclaimed = 0;
861	unsigned long nr_writeback = 0;
862	unsigned long nr_immediate = 0;
863
864	cond_resched();
865
866	while (!list_empty(page_list)) {
867		struct address_space *mapping;
868		struct page *page;
869		int may_enter_fs;
870		enum page_references references = PAGEREF_RECLAIM_CLEAN;
871		bool dirty, writeback;
872
873		cond_resched();
874
875		page = lru_to_page(page_list);
876		list_del(&page->lru);
877
878		if (!trylock_page(page))
879			goto keep;
880
881		VM_BUG_ON_PAGE(PageActive(page), page);
882		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
883
884		sc->nr_scanned++;
885
886		if (unlikely(!page_evictable(page)))
887			goto cull_mlocked;
888
889		if (!sc->may_unmap && page_mapped(page))
890			goto keep_locked;
891
892		/* Double the slab pressure for mapped and swapcache pages */
893		if (page_mapped(page) || PageSwapCache(page))
894			sc->nr_scanned++;
895
896		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
897			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
898
899		/*
900		 * The number of dirty pages determines if a zone is marked
901		 * reclaim_congested which affects wait_iff_congested. kswapd
902		 * will stall and start writing pages if the tail of the LRU
903		 * is all dirty unqueued pages.
904		 */
905		page_check_dirty_writeback(page, &dirty, &writeback);
906		if (dirty || writeback)
907			nr_dirty++;
908
909		if (dirty && !writeback)
910			nr_unqueued_dirty++;
911
912		/*
913		 * Treat this page as congested if the underlying BDI is or if
914		 * pages are cycling through the LRU so quickly that the
915		 * pages marked for immediate reclaim are making it to the
916		 * end of the LRU a second time.
917		 */
918		mapping = page_mapping(page);
919		if (((dirty || writeback) && mapping &&
920		     bdi_write_congested(inode_to_bdi(mapping->host))) ||
921		    (writeback && PageReclaim(page)))
922			nr_congested++;
923
924		/*
925		 * If a page at the tail of the LRU is under writeback, there
926		 * are three cases to consider.
927		 *
928		 * 1) If reclaim is encountering an excessive number of pages
929		 *    under writeback and this page is both under writeback and
930		 *    PageReclaim then it indicates that pages are being queued
931		 *    for IO but are being recycled through the LRU before the
932		 *    IO can complete. Waiting on the page itself risks an
933		 *    indefinite stall if it is impossible to writeback the
934		 *    page due to IO error or disconnected storage so instead
935		 *    note that the LRU is being scanned too quickly and the
936		 *    caller can stall after page list has been processed.
937		 *
938		 * 2) Global reclaim encounters a page, memcg encounters a
939		 *    page that is not marked for immediate reclaim or
940		 *    the caller does not have __GFP_FS (or __GFP_IO if it's
941		 *    simply going to swap, not to fs). In this case mark
942		 *    the page for immediate reclaim and continue scanning.
943		 *
944		 *    Require may_enter_fs because we would wait on fs, which
945		 *    may not have submitted IO yet. And the loop driver might
946		 *    enter reclaim, and deadlock if it waits on a page for
947		 *    which it is needed to do the write (loop masks off
948		 *    __GFP_IO|__GFP_FS for this reason); but more thought
949		 *    would probably show more reasons.
950		 *
951		 * 3) memcg encounters a page that is not already marked
952		 *    PageReclaim. memcg does not have any dirty pages
953		 *    throttling so we could easily OOM just because too many
954		 *    pages are in writeback and there is nothing else to
955		 *    reclaim. Wait for the writeback to complete.
956		 */
957		if (PageWriteback(page)) {
958			/* Case 1 above */
959			if (current_is_kswapd() &&
960			    PageReclaim(page) &&
961			    test_bit(ZONE_WRITEBACK, &zone->flags)) {
962				nr_immediate++;
963				goto keep_locked;
964
965			/* Case 2 above */
966			} else if (global_reclaim(sc) ||
967			    !PageReclaim(page) || !may_enter_fs) {
968				/*
969				 * This is slightly racy - end_page_writeback()
970				 * might have just cleared PageReclaim, then
971				 * setting PageReclaim here end up interpreted
972				 * as PageReadahead - but that does not matter
973				 * enough to care.  What we do want is for this
974				 * page to have PageReclaim set next time memcg
975				 * reclaim reaches the tests above, so it will
976				 * then wait_on_page_writeback() to avoid OOM;
977				 * and it's also appropriate in global reclaim.
978				 */
979				SetPageReclaim(page);
980				nr_writeback++;
981
982				goto keep_locked;
983
984			/* Case 3 above */
985			} else {
986				wait_on_page_writeback(page);
987			}
988		}
989
990		if (!force_reclaim)
991			references = page_check_references(page, sc);
992
993		switch (references) {
994		case PAGEREF_ACTIVATE:
995			goto activate_locked;
996		case PAGEREF_KEEP:
997			goto keep_locked;
998		case PAGEREF_RECLAIM:
999		case PAGEREF_RECLAIM_CLEAN:
1000			; /* try to reclaim the page below */
1001		}
1002
1003		/*
1004		 * Anonymous process memory has backing store?
1005		 * Try to allocate it some swap space here.
1006		 */
1007		if (PageAnon(page) && !PageSwapCache(page)) {
1008			if (!(sc->gfp_mask & __GFP_IO))
1009				goto keep_locked;
1010			if (!add_to_swap(page, page_list))
1011				goto activate_locked;
1012			may_enter_fs = 1;
1013
1014			/* Adding to swap updated mapping */
1015			mapping = page_mapping(page);
1016		}
1017
1018		/*
1019		 * The page is mapped into the page tables of one or more
1020		 * processes. Try to unmap it here.
1021		 */
1022		if (page_mapped(page) && mapping) {
1023			switch (try_to_unmap(page, ttu_flags)) {
1024			case SWAP_FAIL:
1025				goto activate_locked;
1026			case SWAP_AGAIN:
1027				goto keep_locked;
1028			case SWAP_MLOCK:
1029				goto cull_mlocked;
1030			case SWAP_SUCCESS:
1031				; /* try to free the page below */
1032			}
1033		}
1034
1035		if (PageDirty(page)) {
1036			/*
1037			 * Only kswapd can writeback filesystem pages to
1038			 * avoid risk of stack overflow but only writeback
1039			 * if many dirty pages have been encountered.
1040			 */
1041			if (page_is_file_cache(page) &&
1042					(!current_is_kswapd() ||
1043					 !test_bit(ZONE_DIRTY, &zone->flags))) {
1044				/*
1045				 * Immediately reclaim when written back.
1046				 * Similar in principal to deactivate_page()
1047				 * except we already have the page isolated
1048				 * and know it's dirty
1049				 */
1050				inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
1051				SetPageReclaim(page);
1052
1053				goto keep_locked;
1054			}
1055
1056			if (references == PAGEREF_RECLAIM_CLEAN)
1057				goto keep_locked;
1058			if (!may_enter_fs)
1059				goto keep_locked;
1060			if (!sc->may_writepage)
1061				goto keep_locked;
1062
1063			/* Page is dirty, try to write it out here */
1064			switch (pageout(page, mapping, sc)) {
1065			case PAGE_KEEP:
1066				goto keep_locked;
1067			case PAGE_ACTIVATE:
1068				goto activate_locked;
1069			case PAGE_SUCCESS:
1070				if (PageWriteback(page))
1071					goto keep;
1072				if (PageDirty(page))
1073					goto keep;
1074
1075				/*
1076				 * A synchronous write - probably a ramdisk.  Go
1077				 * ahead and try to reclaim the page.
1078				 */
1079				if (!trylock_page(page))
1080					goto keep;
1081				if (PageDirty(page) || PageWriteback(page))
1082					goto keep_locked;
1083				mapping = page_mapping(page);
1084			case PAGE_CLEAN:
1085				; /* try to free the page below */
1086			}
1087		}
1088
1089		/*
1090		 * If the page has buffers, try to free the buffer mappings
1091		 * associated with this page. If we succeed we try to free
1092		 * the page as well.
1093		 *
1094		 * We do this even if the page is PageDirty().
1095		 * try_to_release_page() does not perform I/O, but it is
1096		 * possible for a page to have PageDirty set, but it is actually
1097		 * clean (all its buffers are clean).  This happens if the
1098		 * buffers were written out directly, with submit_bh(). ext3
1099		 * will do this, as well as the blockdev mapping.
1100		 * try_to_release_page() will discover that cleanness and will
1101		 * drop the buffers and mark the page clean - it can be freed.
1102		 *
1103		 * Rarely, pages can have buffers and no ->mapping.  These are
1104		 * the pages which were not successfully invalidated in
1105		 * truncate_complete_page().  We try to drop those buffers here
1106		 * and if that worked, and the page is no longer mapped into
1107		 * process address space (page_count == 1) it can be freed.
1108		 * Otherwise, leave the page on the LRU so it is swappable.
1109		 */
1110		if (page_has_private(page)) {
1111			if (!try_to_release_page(page, sc->gfp_mask))
1112				goto activate_locked;
1113			if (!mapping && page_count(page) == 1) {
1114				unlock_page(page);
1115				if (put_page_testzero(page))
1116					goto free_it;
1117				else {
1118					/*
1119					 * rare race with speculative reference.
1120					 * the speculative reference will free
1121					 * this page shortly, so we may
1122					 * increment nr_reclaimed here (and
1123					 * leave it off the LRU).
1124					 */
1125					nr_reclaimed++;
1126					continue;
1127				}
1128			}
1129		}
1130
1131		if (!mapping || !__remove_mapping(mapping, page, true))
1132			goto keep_locked;
1133
1134		/*
1135		 * At this point, we have no other references and there is
1136		 * no way to pick any more up (removed from LRU, removed
1137		 * from pagecache). Can use non-atomic bitops now (and
1138		 * we obviously don't have to worry about waking up a process
1139		 * waiting on the page lock, because there are no references.
1140		 */
1141		__clear_page_locked(page);
1142free_it:
1143		nr_reclaimed++;
1144
1145		/*
1146		 * Is there need to periodically free_page_list? It would
1147		 * appear not as the counts should be low
1148		 */
1149		list_add(&page->lru, &free_pages);
1150		continue;
1151
1152cull_mlocked:
1153		if (PageSwapCache(page))
1154			try_to_free_swap(page);
1155		unlock_page(page);
1156		list_add(&page->lru, &ret_pages);
1157		continue;
1158
1159activate_locked:
1160		/* Not a candidate for swapping, so reclaim swap space. */
1161		if (PageSwapCache(page) && vm_swap_full())
1162			try_to_free_swap(page);
1163		VM_BUG_ON_PAGE(PageActive(page), page);
1164		SetPageActive(page);
1165		pgactivate++;
1166keep_locked:
1167		unlock_page(page);
1168keep:
1169		list_add(&page->lru, &ret_pages);
1170		VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1171	}
1172
1173	mem_cgroup_uncharge_list(&free_pages);
1174	free_hot_cold_page_list(&free_pages, true);
1175
1176	list_splice(&ret_pages, page_list);
1177	count_vm_events(PGACTIVATE, pgactivate);
1178
1179	*ret_nr_dirty += nr_dirty;
1180	*ret_nr_congested += nr_congested;
1181	*ret_nr_unqueued_dirty += nr_unqueued_dirty;
1182	*ret_nr_writeback += nr_writeback;
1183	*ret_nr_immediate += nr_immediate;
1184	return nr_reclaimed;
1185}
1186
1187unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1188					    struct list_head *page_list)
1189{
1190	struct scan_control sc = {
1191		.gfp_mask = GFP_KERNEL,
1192		.priority = DEF_PRIORITY,
1193		.may_unmap = 1,
1194	};
1195	unsigned long ret, dummy1, dummy2, dummy3, dummy4, dummy5;
1196	struct page *page, *next;
1197	LIST_HEAD(clean_pages);
1198
1199	list_for_each_entry_safe(page, next, page_list, lru) {
1200		if (page_is_file_cache(page) && !PageDirty(page) &&
1201		    !isolated_balloon_page(page)) {
1202			ClearPageActive(page);
1203			list_move(&page->lru, &clean_pages);
1204		}
1205	}
1206
1207	ret = shrink_page_list(&clean_pages, zone, &sc,
1208			TTU_UNMAP|TTU_IGNORE_ACCESS,
1209			&dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1210	list_splice(&clean_pages, page_list);
1211	mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1212	return ret;
1213}
1214
1215/*
1216 * Attempt to remove the specified page from its LRU.  Only take this page
1217 * if it is of the appropriate PageActive status.  Pages which are being
1218 * freed elsewhere are also ignored.
1219 *
1220 * page:	page to consider
1221 * mode:	one of the LRU isolation modes defined above
1222 *
1223 * returns 0 on success, -ve errno on failure.
1224 */
1225int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1226{
1227	int ret = -EINVAL;
1228
1229	/* Only take pages on the LRU. */
1230	if (!PageLRU(page))
1231		return ret;
1232
1233	/* Compaction should not handle unevictable pages but CMA can do so */
1234	if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
1235		return ret;
1236
1237	ret = -EBUSY;
1238
1239	/*
1240	 * To minimise LRU disruption, the caller can indicate that it only
1241	 * wants to isolate pages it will be able to operate on without
1242	 * blocking - clean pages for the most part.
1243	 *
1244	 * ISOLATE_CLEAN means that only clean pages should be isolated. This
1245	 * is used by reclaim when it is cannot write to backing storage
1246	 *
1247	 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
1248	 * that it is possible to migrate without blocking
1249	 */
1250	if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
1251		/* All the caller can do on PageWriteback is block */
1252		if (PageWriteback(page))
1253			return ret;
1254
1255		if (PageDirty(page)) {
1256			struct address_space *mapping;
1257
1258			/* ISOLATE_CLEAN means only clean pages */
1259			if (mode & ISOLATE_CLEAN)
1260				return ret;
1261
1262			/*
1263			 * Only pages without mappings or that have a
1264			 * ->migratepage callback are possible to migrate
1265			 * without blocking
1266			 */
1267			mapping = page_mapping(page);
1268			if (mapping && !mapping->a_ops->migratepage)
1269				return ret;
1270		}
1271	}
1272
1273	if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
1274		return ret;
1275
1276	if (likely(get_page_unless_zero(page))) {
1277		/*
1278		 * Be careful not to clear PageLRU until after we're
1279		 * sure the page is not being freed elsewhere -- the
1280		 * page release code relies on it.
1281		 */
1282		ClearPageLRU(page);
1283		ret = 0;
1284	}
1285
1286	return ret;
1287}
1288
1289/*
1290 * zone->lru_lock is heavily contended.  Some of the functions that
1291 * shrink the lists perform better by taking out a batch of pages
1292 * and working on them outside the LRU lock.
1293 *
1294 * For pagecache intensive workloads, this function is the hottest
1295 * spot in the kernel (apart from copy_*_user functions).
1296 *
1297 * Appropriate locks must be held before calling this function.
1298 *
1299 * @nr_to_scan:	The number of pages to look through on the list.
1300 * @lruvec:	The LRU vector to pull pages from.
1301 * @dst:	The temp list to put pages on to.
1302 * @nr_scanned:	The number of pages that were scanned.
1303 * @sc:		The scan_control struct for this reclaim session
1304 * @mode:	One of the LRU isolation modes
1305 * @lru:	LRU list id for isolating
1306 *
1307 * returns how many pages were moved onto *@dst.
1308 */
1309static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1310		struct lruvec *lruvec, struct list_head *dst,
1311		unsigned long *nr_scanned, struct scan_control *sc,
1312		isolate_mode_t mode, enum lru_list lru)
1313{
1314	struct list_head *src = &lruvec->lists[lru];
1315	unsigned long nr_taken = 0;
1316	unsigned long scan;
1317
1318	for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1319		struct page *page;
1320		int nr_pages;
1321
1322		page = lru_to_page(src);
1323		prefetchw_prev_lru_page(page, src, flags);
1324
1325		VM_BUG_ON_PAGE(!PageLRU(page), page);
1326
1327		switch (__isolate_lru_page(page, mode)) {
1328		case 0:
1329			nr_pages = hpage_nr_pages(page);
1330			mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1331			list_move(&page->lru, dst);
1332			nr_taken += nr_pages;
1333			break;
1334
1335		case -EBUSY:
1336			/* else it is being freed elsewhere */
1337			list_move(&page->lru, src);
1338			continue;
1339
1340		default:
1341			BUG();
1342		}
1343	}
1344
1345	*nr_scanned = scan;
1346	trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1347				    nr_taken, mode, is_file_lru(lru));
1348	return nr_taken;
1349}
1350
1351/**
1352 * isolate_lru_page - tries to isolate a page from its LRU list
1353 * @page: page to isolate from its LRU list
1354 *
1355 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1356 * vmstat statistic corresponding to whatever LRU list the page was on.
1357 *
1358 * Returns 0 if the page was removed from an LRU list.
1359 * Returns -EBUSY if the page was not on an LRU list.
1360 *
1361 * The returned page will have PageLRU() cleared.  If it was found on
1362 * the active list, it will have PageActive set.  If it was found on
1363 * the unevictable list, it will have the PageUnevictable bit set. That flag
1364 * may need to be cleared by the caller before letting the page go.
1365 *
1366 * The vmstat statistic corresponding to the list on which the page was
1367 * found will be decremented.
1368 *
1369 * Restrictions:
1370 * (1) Must be called with an elevated refcount on the page. This is a
1371 *     fundamentnal difference from isolate_lru_pages (which is called
1372 *     without a stable reference).
1373 * (2) the lru_lock must not be held.
1374 * (3) interrupts must be enabled.
1375 */
1376int isolate_lru_page(struct page *page)
1377{
1378	int ret = -EBUSY;
1379
1380	VM_BUG_ON_PAGE(!page_count(page), page);
1381
1382	if (PageLRU(page)) {
1383		struct zone *zone = page_zone(page);
1384		struct lruvec *lruvec;
1385
1386		spin_lock_irq(&zone->lru_lock);
1387		lruvec = mem_cgroup_page_lruvec(page, zone);
1388		if (PageLRU(page)) {
1389			int lru = page_lru(page);
1390			get_page(page);
1391			ClearPageLRU(page);
1392			del_page_from_lru_list(page, lruvec, lru);
1393			ret = 0;
1394		}
1395		spin_unlock_irq(&zone->lru_lock);
1396	}
1397	return ret;
1398}
1399
1400/*
1401 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
1402 * then get resheduled. When there are massive number of tasks doing page
1403 * allocation, such sleeping direct reclaimers may keep piling up on each CPU,
1404 * the LRU list will go small and be scanned faster than necessary, leading to
1405 * unnecessary swapping, thrashing and OOM.
1406 */
1407static int too_many_isolated(struct zone *zone, int file,
1408		struct scan_control *sc)
1409{
1410	unsigned long inactive, isolated;
1411
1412	if (current_is_kswapd())
1413		return 0;
1414
1415	if (!global_reclaim(sc))
1416		return 0;
1417
1418	if (file) {
1419		inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1420		isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1421	} else {
1422		inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1423		isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1424	}
1425
1426	/*
1427	 * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
1428	 * won't get blocked by normal direct-reclaimers, forming a circular
1429	 * deadlock.
1430	 */
1431	if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
1432		inactive >>= 3;
1433
1434	return isolated > inactive;
1435}
1436
1437static noinline_for_stack void
1438putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1439{
1440	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1441	struct zone *zone = lruvec_zone(lruvec);
1442	LIST_HEAD(pages_to_free);
1443
1444	/*
1445	 * Put back any unfreeable pages.
1446	 */
1447	while (!list_empty(page_list)) {
1448		struct page *page = lru_to_page(page_list);
1449		int lru;
1450
1451		VM_BUG_ON_PAGE(PageLRU(page), page);
1452		list_del(&page->lru);
1453		if (unlikely(!page_evictable(page))) {
1454			spin_unlock_irq(&zone->lru_lock);
1455			putback_lru_page(page);
1456			spin_lock_irq(&zone->lru_lock);
1457			continue;
1458		}
1459
1460		lruvec = mem_cgroup_page_lruvec(page, zone);
1461
1462		SetPageLRU(page);
1463		lru = page_lru(page);
1464		add_page_to_lru_list(page, lruvec, lru);
1465
1466		if (is_active_lru(lru)) {
1467			int file = is_file_lru(lru);
1468			int numpages = hpage_nr_pages(page);
1469			reclaim_stat->recent_rotated[file] += numpages;
1470		}
1471		if (put_page_testzero(page)) {
1472			__ClearPageLRU(page);
1473			__ClearPageActive(page);
1474			del_page_from_lru_list(page, lruvec, lru);
1475
1476			if (unlikely(PageCompound(page))) {
1477				spin_unlock_irq(&zone->lru_lock);
1478				mem_cgroup_uncharge(page);
1479				(*get_compound_page_dtor(page))(page);
1480				spin_lock_irq(&zone->lru_lock);
1481			} else
1482				list_add(&page->lru, &pages_to_free);
1483		}
1484	}
1485
1486	/*
1487	 * To save our caller's stack, now use input list for pages to free.
1488	 */
1489	list_splice(&pages_to_free, page_list);
1490}
1491
1492/*
1493 * If a kernel thread (such as nfsd for loop-back mounts) services
1494 * a backing device by writing to the page cache it sets PF_LESS_THROTTLE.
1495 * In that case we should only throttle if the backing device it is
1496 * writing to is congested.  In other cases it is safe to throttle.
1497 */
1498static int current_may_throttle(void)
1499{
1500	return !(current->flags & PF_LESS_THROTTLE) ||
1501		current->backing_dev_info == NULL ||
1502		bdi_write_congested(current->backing_dev_info);
1503}
1504
1505/*
1506 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
1507 * of reclaimed pages
1508 */
1509static noinline_for_stack unsigned long
1510shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1511		     struct scan_control *sc, enum lru_list lru)
1512{
1513	LIST_HEAD(page_list);
1514	unsigned long nr_scanned;
1515	unsigned long nr_reclaimed = 0;
1516	unsigned long nr_taken;
1517	unsigned long nr_dirty = 0;
1518	unsigned long nr_congested = 0;
1519	unsigned long nr_unqueued_dirty = 0;
1520	unsigned long nr_writeback = 0;
1521	unsigned long nr_immediate = 0;
1522	isolate_mode_t isolate_mode = 0;
1523	int file = is_file_lru(lru);
1524	struct zone *zone = lruvec_zone(lruvec);
1525	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1526
1527	while (unlikely(too_many_isolated(zone, file, sc))) {
1528		congestion_wait(BLK_RW_ASYNC, HZ/10);
1529
1530		/* We are about to die and free our memory. Return now. */
1531		if (fatal_signal_pending(current))
1532			return SWAP_CLUSTER_MAX;
1533	}
1534
1535	lru_add_drain();
1536
1537	if (!sc->may_unmap)
1538		isolate_mode |= ISOLATE_UNMAPPED;
1539	if (!sc->may_writepage)
1540		isolate_mode |= ISOLATE_CLEAN;
1541
1542	spin_lock_irq(&zone->lru_lock);
1543
1544	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1545				     &nr_scanned, sc, isolate_mode, lru);
1546
1547	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1548	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1549
1550	if (global_reclaim(sc)) {
1551		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1552		if (current_is_kswapd())
1553			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1554		else
1555			__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1556	}
1557	spin_unlock_irq(&zone->lru_lock);
1558
1559	if (nr_taken == 0)
1560		return 0;
1561
1562	nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP,
1563				&nr_dirty, &nr_unqueued_dirty, &nr_congested,
1564				&nr_writeback, &nr_immediate,
1565				false);
1566
1567	spin_lock_irq(&zone->lru_lock);
1568
1569	reclaim_stat->recent_scanned[file] += nr_taken;
1570
1571	if (global_reclaim(sc)) {
1572		if (current_is_kswapd())
1573			__count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1574					       nr_reclaimed);
1575		else
1576			__count_zone_vm_events(PGSTEAL_DIRECT, zone,
1577					       nr_reclaimed);
1578	}
1579
1580	putback_inactive_pages(lruvec, &page_list);
1581
1582	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1583
1584	spin_unlock_irq(&zone->lru_lock);
1585
1586	mem_cgroup_uncharge_list(&page_list);
1587	free_hot_cold_page_list(&page_list, true);
1588
1589	/*
1590	 * If reclaim is isolating dirty pages under writeback, it implies
1591	 * that the long-lived page allocation rate is exceeding the page
1592	 * laundering rate. Either the global limits are not being effective
1593	 * at throttling processes due to the page distribution throughout
1594	 * zones or there is heavy usage of a slow backing device. The
1595	 * only option is to throttle from reclaim context which is not ideal
1596	 * as there is no guarantee the dirtying process is throttled in the
1597	 * same way balance_dirty_pages() manages.
1598	 *
1599	 * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
1600	 * of pages under pages flagged for immediate reclaim and stall if any
1601	 * are encountered in the nr_immediate check below.
1602	 */
1603	if (nr_writeback && nr_writeback == nr_taken)
1604		set_bit(ZONE_WRITEBACK, &zone->flags);
1605
1606	/*
1607	 * memcg will stall in page writeback so only consider forcibly
1608	 * stalling for global reclaim
1609	 */
1610	if (global_reclaim(sc)) {
1611		/*
1612		 * Tag a zone as congested if all the dirty pages scanned were
1613		 * backed by a congested BDI and wait_iff_congested will stall.
1614		 */
1615		if (nr_dirty && nr_dirty == nr_congested)
1616			set_bit(ZONE_CONGESTED, &zone->flags);
1617
1618		/*
1619		 * If dirty pages are scanned that are not queued for IO, it
1620		 * implies that flushers are not keeping up. In this case, flag
1621		 * the zone ZONE_DIRTY and kswapd will start writing pages from
1622		 * reclaim context.
1623		 */
1624		if (nr_unqueued_dirty == nr_taken)
1625			set_bit(ZONE_DIRTY, &zone->flags);
1626
1627		/*
1628		 * If kswapd scans pages marked marked for immediate
1629		 * reclaim and under writeback (nr_immediate), it implies
1630		 * that pages are cycling through the LRU faster than
1631		 * they are written so also forcibly stall.
1632		 */
1633		if (nr_immediate && current_may_throttle())
1634			congestion_wait(BLK_RW_ASYNC, HZ/10);
1635	}
1636
1637	/*
1638	 * Stall direct reclaim for IO completions if underlying BDIs or zone
1639	 * is congested. Allow kswapd to continue until it starts encountering
1640	 * unqueued dirty pages or cycling through the LRU too quickly.
1641	 */
1642	if (!sc->hibernation_mode && !current_is_kswapd() &&
1643	    current_may_throttle())
1644		wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1645
1646	trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1647		zone_idx(zone),
1648		nr_scanned, nr_reclaimed,
1649		sc->priority,
1650		trace_shrink_flags(file));
1651	return nr_reclaimed;
1652}
1653
1654/*
1655 * This moves pages from the active list to the inactive list.
1656 *
1657 * We move them the other way if the page is referenced by one or more
1658 * processes, from rmap.
1659 *
1660 * If the pages are mostly unmapped, the processing is fast and it is
1661 * appropriate to hold zone->lru_lock across the whole operation.  But if
1662 * the pages are mapped, the processing is slow (page_referenced()) so we
1663 * should drop zone->lru_lock around each page.  It's impossible to balance
1664 * this, so instead we remove the pages from the LRU while processing them.
1665 * It is safe to rely on PG_active against the non-LRU pages in here because
1666 * nobody will play with that bit on a non-LRU page.
1667 *
1668 * The downside is that we have to touch page->_count against each page.
1669 * But we had to alter page->flags anyway.
1670 */
1671
1672static void move_active_pages_to_lru(struct lruvec *lruvec,
1673				     struct list_head *list,
1674				     struct list_head *pages_to_free,
1675				     enum lru_list lru)
1676{
1677	struct zone *zone = lruvec_zone(lruvec);
1678	unsigned long pgmoved = 0;
1679	struct page *page;
1680	int nr_pages;
1681
1682	while (!list_empty(list)) {
1683		page = lru_to_page(list);
1684		lruvec = mem_cgroup_page_lruvec(page, zone);
1685
1686		VM_BUG_ON_PAGE(PageLRU(page), page);
1687		SetPageLRU(page);
1688
1689		nr_pages = hpage_nr_pages(page);
1690		mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1691		list_move(&page->lru, &lruvec->lists[lru]);
1692		pgmoved += nr_pages;
1693
1694		if (put_page_testzero(page)) {
1695			__ClearPageLRU(page);
1696			__ClearPageActive(page);
1697			del_page_from_lru_list(page, lruvec, lru);
1698
1699			if (unlikely(PageCompound(page))) {
1700				spin_unlock_irq(&zone->lru_lock);
1701				mem_cgroup_uncharge(page);
1702				(*get_compound_page_dtor(page))(page);
1703				spin_lock_irq(&zone->lru_lock);
1704			} else
1705				list_add(&page->lru, pages_to_free);
1706		}
1707	}
1708	__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1709	if (!is_active_lru(lru))
1710		__count_vm_events(PGDEACTIVATE, pgmoved);
1711}
1712
1713static void shrink_active_list(unsigned long nr_to_scan,
1714			       struct lruvec *lruvec,
1715			       struct scan_control *sc,
1716			       enum lru_list lru)
1717{
1718	unsigned long nr_taken;
1719	unsigned long nr_scanned;
1720	unsigned long vm_flags;
1721	LIST_HEAD(l_hold);	/* The pages which were snipped off */
1722	LIST_HEAD(l_active);
1723	LIST_HEAD(l_inactive);
1724	struct page *page;
1725	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1726	unsigned long nr_rotated = 0;
1727	isolate_mode_t isolate_mode = 0;
1728	int file = is_file_lru(lru);
1729	struct zone *zone = lruvec_zone(lruvec);
1730
1731	lru_add_drain();
1732
1733	if (!sc->may_unmap)
1734		isolate_mode |= ISOLATE_UNMAPPED;
1735	if (!sc->may_writepage)
1736		isolate_mode |= ISOLATE_CLEAN;
1737
1738	spin_lock_irq(&zone->lru_lock);
1739
1740	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1741				     &nr_scanned, sc, isolate_mode, lru);
1742	if (global_reclaim(sc))
1743		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1744
1745	reclaim_stat->recent_scanned[file] += nr_taken;
1746
1747	__count_zone_vm_events(PGREFILL, zone, nr_scanned);
1748	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1749	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1750	spin_unlock_irq(&zone->lru_lock);
1751
1752	while (!list_empty(&l_hold)) {
1753		cond_resched();
1754		page = lru_to_page(&l_hold);
1755		list_del(&page->lru);
1756
1757		if (unlikely(!page_evictable(page))) {
1758			putback_lru_page(page);
1759			continue;
1760		}
1761
1762		if (unlikely(buffer_heads_over_limit)) {
1763			if (page_has_private(page) && trylock_page(page)) {
1764				if (page_has_private(page))
1765					try_to_release_page(page, 0);
1766				unlock_page(page);
1767			}
1768		}
1769
1770		if (page_referenced(page, 0, sc->target_mem_cgroup,
1771				    &vm_flags)) {
1772			nr_rotated += hpage_nr_pages(page);
1773			/*
1774			 * Identify referenced, file-backed active pages and
1775			 * give them one more trip around the active list. So
1776			 * that executable code get better chances to stay in
1777			 * memory under moderate memory pressure.  Anon pages
1778			 * are not likely to be evicted by use-once streaming
1779			 * IO, plus JVM can create lots of anon VM_EXEC pages,
1780			 * so we ignore them here.
1781			 */
1782			if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
1783				list_add(&page->lru, &l_active);
1784				continue;
1785			}
1786		}
1787
1788		ClearPageActive(page);	/* we are de-activating */
1789		list_add(&page->lru, &l_inactive);
1790	}
1791
1792	/*
1793	 * Move pages back to the lru list.
1794	 */
1795	spin_lock_irq(&zone->lru_lock);
1796	/*
1797	 * Count referenced pages from currently used mappings as rotated,
1798	 * even though only some of them are actually re-activated.  This
1799	 * helps balance scan pressure between file and anonymous pages in
1800	 * get_scan_count.
1801	 */
1802	reclaim_stat->recent_rotated[file] += nr_rotated;
1803
1804	move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1805	move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1806	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1807	spin_unlock_irq(&zone->lru_lock);
1808
1809	mem_cgroup_uncharge_list(&l_hold);
1810	free_hot_cold_page_list(&l_hold, true);
1811}
1812
1813#ifdef CONFIG_SWAP
1814static int inactive_anon_is_low_global(struct zone *zone)
1815{
1816	unsigned long active, inactive;
1817
1818	active = zone_page_state(zone, NR_ACTIVE_ANON);
1819	inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1820
1821	if (inactive * zone->inactive_ratio < active)
1822		return 1;
1823
1824	return 0;
1825}
1826
1827/**
1828 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1829 * @lruvec: LRU vector to check
1830 *
1831 * Returns true if the zone does not have enough inactive anon pages,
1832 * meaning some active anon pages need to be deactivated.
1833 */
1834static int inactive_anon_is_low(struct lruvec *lruvec)
1835{
1836	/*
1837	 * If we don't have swap space, anonymous page deactivation
1838	 * is pointless.
1839	 */
1840	if (!total_swap_pages)
1841		return 0;
1842
1843	if (!mem_cgroup_disabled())
1844		return mem_cgroup_inactive_anon_is_low(lruvec);
1845
1846	return inactive_anon_is_low_global(lruvec_zone(lruvec));
1847}
1848#else
1849static inline int inactive_anon_is_low(struct lruvec *lruvec)
1850{
1851	return 0;
1852}
1853#endif
1854
1855/**
1856 * inactive_file_is_low - check if file pages need to be deactivated
1857 * @lruvec: LRU vector to check
1858 *
1859 * When the system is doing streaming IO, memory pressure here
1860 * ensures that active file pages get deactivated, until more
1861 * than half of the file pages are on the inactive list.
1862 *
1863 * Once we get to that situation, protect the system's working
1864 * set from being evicted by disabling active file page aging.
1865 *
1866 * This uses a different ratio than the anonymous pages, because
1867 * the page cache uses a use-once replacement algorithm.
1868 */
1869static int inactive_file_is_low(struct lruvec *lruvec)
1870{
1871	unsigned long inactive;
1872	unsigned long active;
1873
1874	inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1875	active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
1876
1877	return active > inactive;
1878}
1879
1880static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1881{
1882	if (is_file_lru(lru))
1883		return inactive_file_is_low(lruvec);
1884	else
1885		return inactive_anon_is_low(lruvec);
1886}
1887
1888static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1889				 struct lruvec *lruvec, struct scan_control *sc)
1890{
1891	if (is_active_lru(lru)) {
1892		if (inactive_list_is_low(lruvec, lru))
1893			shrink_active_list(nr_to_scan, lruvec, sc, lru);
1894		return 0;
1895	}
1896
1897	return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1898}
1899
1900enum scan_balance {
1901	SCAN_EQUAL,
1902	SCAN_FRACT,
1903	SCAN_ANON,
1904	SCAN_FILE,
1905};
1906
1907/*
1908 * Determine how aggressively the anon and file LRU lists should be
1909 * scanned.  The relative value of each set of LRU lists is determined
1910 * by looking at the fraction of the pages scanned we did rotate back
1911 * onto the active list instead of evict.
1912 *
1913 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1914 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1915 */
1916static void get_scan_count(struct lruvec *lruvec, int swappiness,
1917			   struct scan_control *sc, unsigned long *nr,
1918			   unsigned long *lru_pages)
1919{
1920	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1921	u64 fraction[2];
1922	u64 denominator = 0;	/* gcc */
1923	struct zone *zone = lruvec_zone(lruvec);
1924	unsigned long anon_prio, file_prio;
1925	enum scan_balance scan_balance;
1926	unsigned long anon, file;
1927	bool force_scan = false;
1928	unsigned long ap, fp;
1929	enum lru_list lru;
1930	bool some_scanned;
1931	int pass;
1932
1933	/*
1934	 * If the zone or memcg is small, nr[l] can be 0.  This
1935	 * results in no scanning on this priority and a potential
1936	 * priority drop.  Global direct reclaim can go to the next
1937	 * zone and tends to have no problems. Global kswapd is for
1938	 * zone balancing and it needs to scan a minimum amount. When
1939	 * reclaiming for a memcg, a priority drop can cause high
1940	 * latencies, so it's better to scan a minimum amount there as
1941	 * well.
1942	 */
1943	if (current_is_kswapd()) {
1944		if (!zone_reclaimable(zone))
1945			force_scan = true;
1946		if (!mem_cgroup_lruvec_online(lruvec))
1947			force_scan = true;
1948	}
1949	if (!global_reclaim(sc))
1950		force_scan = true;
1951
1952	/* If we have no swap space, do not bother scanning anon pages. */
1953	if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
1954		scan_balance = SCAN_FILE;
1955		goto out;
1956	}
1957
1958	/*
1959	 * Global reclaim will swap to prevent OOM even with no
1960	 * swappiness, but memcg users want to use this knob to
1961	 * disable swapping for individual groups completely when
1962	 * using the memory controller's swap limit feature would be
1963	 * too expensive.
1964	 */
1965	if (!global_reclaim(sc) && !swappiness) {
1966		scan_balance = SCAN_FILE;
1967		goto out;
1968	}
1969
1970	/*
1971	 * Do not apply any pressure balancing cleverness when the
1972	 * system is close to OOM, scan both anon and file equally
1973	 * (unless the swappiness setting disagrees with swapping).
1974	 */
1975	if (!sc->priority && swappiness) {
1976		scan_balance = SCAN_EQUAL;
1977		goto out;
1978	}
1979
1980	/*
1981	 * Prevent the reclaimer from falling into the cache trap: as
1982	 * cache pages start out inactive, every cache fault will tip
1983	 * the scan balance towards the file LRU.  And as the file LRU
1984	 * shrinks, so does the window for rotation from references.
1985	 * This means we have a runaway feedback loop where a tiny
1986	 * thrashing file LRU becomes infinitely more attractive than
1987	 * anon pages.  Try to detect this based on file LRU size.
1988	 */
1989	if (global_reclaim(sc)) {
1990		unsigned long zonefile;
1991		unsigned long zonefree;
1992
1993		zonefree = zone_page_state(zone, NR_FREE_PAGES);
1994		zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
1995			   zone_page_state(zone, NR_INACTIVE_FILE);
1996
1997		if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
1998			scan_balance = SCAN_ANON;
1999			goto out;
2000		}
2001	}
2002
2003	/*
2004	 * There is enough inactive page cache, do not reclaim
2005	 * anything from the anonymous working set right now.
2006	 */
2007	if (!inactive_file_is_low(lruvec)) {
2008		scan_balance = SCAN_FILE;
2009		goto out;
2010	}
2011
2012	scan_balance = SCAN_FRACT;
2013
2014	/*
2015	 * With swappiness at 100, anonymous and file have the same priority.
2016	 * This scanning priority is essentially the inverse of IO cost.
2017	 */
2018	anon_prio = swappiness;
2019	file_prio = 200 - anon_prio;
2020
2021	/*
2022	 * OK, so we have swap space and a fair amount of page cache
2023	 * pages.  We use the recently rotated / recently scanned
2024	 * ratios to determine how valuable each cache is.
2025	 *
2026	 * Because workloads change over time (and to avoid overflow)
2027	 * we keep these statistics as a floating average, which ends
2028	 * up weighing recent references more than old ones.
2029	 *
2030	 * anon in [0], file in [1]
2031	 */
2032
2033	anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
2034		get_lru_size(lruvec, LRU_INACTIVE_ANON);
2035	file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
2036		get_lru_size(lruvec, LRU_INACTIVE_FILE);
2037
2038	spin_lock_irq(&zone->lru_lock);
2039	if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
2040		reclaim_stat->recent_scanned[0] /= 2;
2041		reclaim_stat->recent_rotated[0] /= 2;
2042	}
2043
2044	if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
2045		reclaim_stat->recent_scanned[1] /= 2;
2046		reclaim_stat->recent_rotated[1] /= 2;
2047	}
2048
2049	/*
2050	 * The amount of pressure on anon vs file pages is inversely
2051	 * proportional to the fraction of recently scanned pages on
2052	 * each list that were recently referenced and in active use.
2053	 */
2054	ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
2055	ap /= reclaim_stat->recent_rotated[0] + 1;
2056
2057	fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
2058	fp /= reclaim_stat->recent_rotated[1] + 1;
2059	spin_unlock_irq(&zone->lru_lock);
2060
2061	fraction[0] = ap;
2062	fraction[1] = fp;
2063	denominator = ap + fp + 1;
2064out:
2065	some_scanned = false;
2066	/* Only use force_scan on second pass. */
2067	for (pass = 0; !some_scanned && pass < 2; pass++) {
2068		*lru_pages = 0;
2069		for_each_evictable_lru(lru) {
2070			int file = is_file_lru(lru);
2071			unsigned long size;
2072			unsigned long scan;
2073
2074			size = get_lru_size(lruvec, lru);
2075			scan = size >> sc->priority;
2076
2077			if (!scan && pass && force_scan)
2078				scan = min(size, SWAP_CLUSTER_MAX);
2079
2080			switch (scan_balance) {
2081			case SCAN_EQUAL:
2082				/* Scan lists relative to size */
2083				break;
2084			case SCAN_FRACT:
2085				/*
2086				 * Scan types proportional to swappiness and
2087				 * their relative recent reclaim efficiency.
2088				 */
2089				scan = div64_u64(scan * fraction[file],
2090							denominator);
2091				break;
2092			case SCAN_FILE:
2093			case SCAN_ANON:
2094				/* Scan one type exclusively */
2095				if ((scan_balance == SCAN_FILE) != file) {
2096					size = 0;
2097					scan = 0;
2098				}
2099				break;
2100			default:
2101				/* Look ma, no brain */
2102				BUG();
2103			}
2104
2105			*lru_pages += size;
2106			nr[lru] = scan;
2107
2108			/*
2109			 * Skip the second pass and don't force_scan,
2110			 * if we found something to scan.
2111			 */
2112			some_scanned |= !!scan;
2113		}
2114	}
2115}
2116
2117/*
2118 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
2119 */
2120static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
2121			  struct scan_control *sc, unsigned long *lru_pages)
2122{
2123	unsigned long nr[NR_LRU_LISTS];
2124	unsigned long targets[NR_LRU_LISTS];
2125	unsigned long nr_to_scan;
2126	enum lru_list lru;
2127	unsigned long nr_reclaimed = 0;
2128	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
2129	struct blk_plug plug;
2130	bool scan_adjusted;
2131
2132	get_scan_count(lruvec, swappiness, sc, nr, lru_pages);
2133
2134	/* Record the original scan target for proportional adjustments later */
2135	memcpy(targets, nr, sizeof(nr));
2136
2137	/*
2138	 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal
2139	 * event that can occur when there is little memory pressure e.g.
2140	 * multiple streaming readers/writers. Hence, we do not abort scanning
2141	 * when the requested number of pages are reclaimed when scanning at
2142	 * DEF_PRIORITY on the assumption that the fact we are direct
2143	 * reclaiming implies that kswapd is not keeping up and it is best to
2144	 * do a batch of work at once. For memcg reclaim one check is made to
2145	 * abort proportional reclaim if either the file or anon lru has already
2146	 * dropped to zero at the first pass.
2147	 */
2148	scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2149			 sc->priority == DEF_PRIORITY);
2150
2151	blk_start_plug(&plug);
2152	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2153					nr[LRU_INACTIVE_FILE]) {
2154		unsigned long nr_anon, nr_file, percentage;
2155		unsigned long nr_scanned;
2156
2157		for_each_evictable_lru(lru) {
2158			if (nr[lru]) {
2159				nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
2160				nr[lru] -= nr_to_scan;
2161
2162				nr_reclaimed += shrink_list(lru, nr_to_scan,
2163							    lruvec, sc);
2164			}
2165		}
2166
2167		if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2168			continue;
2169
2170		/*
2171		 * For kswapd and memcg, reclaim at least the number of pages
2172		 * requested. Ensure that the anon and file LRUs are scanned
2173		 * proportionally what was requested by get_scan_count(). We
2174		 * stop reclaiming one LRU and reduce the amount scanning
2175		 * proportional to the original scan target.
2176		 */
2177		nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];
2178		nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];
2179
2180		/*
2181		 * It's just vindictive to attack the larger once the smaller
2182		 * has gone to zero.  And given the way we stop scanning the
2183		 * smaller below, this makes sure that we only make one nudge
2184		 * towards proportionality once we've got nr_to_reclaim.
2185		 */
2186		if (!nr_file || !nr_anon)
2187			break;
2188
2189		if (nr_file > nr_anon) {
2190			unsigned long scan_target = targets[LRU_INACTIVE_ANON] +
2191						targets[LRU_ACTIVE_ANON] + 1;
2192			lru = LRU_BASE;
2193			percentage = nr_anon * 100 / scan_target;
2194		} else {
2195			unsigned long scan_target = targets[LRU_INACTIVE_FILE] +
2196						targets[LRU_ACTIVE_FILE] + 1;
2197			lru = LRU_FILE;
2198			percentage = nr_file * 100 / scan_target;
2199		}
2200
2201		/* Stop scanning the smaller of the LRU */
2202		nr[lru] = 0;
2203		nr[lru + LRU_ACTIVE] = 0;
2204
2205		/*
2206		 * Recalculate the other LRU scan count based on its original
2207		 * scan target and the percentage scanning already complete
2208		 */
2209		lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;
2210		nr_scanned = targets[lru] - nr[lru];
2211		nr[lru] = targets[lru] * (100 - percentage) / 100;
2212		nr[lru] -= min(nr[lru], nr_scanned);
2213
2214		lru += LRU_ACTIVE;
2215		nr_scanned = targets[lru] - nr[lru];
2216		nr[lru] = targets[lru] * (100 - percentage) / 100;
2217		nr[lru] -= min(nr[lru], nr_scanned);
2218
2219		scan_adjusted = true;
2220	}
2221	blk_finish_plug(&plug);
2222	sc->nr_reclaimed += nr_reclaimed;
2223
2224	/*
2225	 * Even if we did not try to evict anon pages at all, we want to
2226	 * rebalance the anon lru active/inactive ratio.
2227	 */
2228	if (inactive_anon_is_low(lruvec))
2229		shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2230				   sc, LRU_ACTIVE_ANON);
2231
2232	throttle_vm_writeout(sc->gfp_mask);
2233}
2234
2235/* Use reclaim/compaction for costly allocs or under memory pressure */
2236static bool in_reclaim_compaction(struct scan_control *sc)
2237{
2238	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
2239			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
2240			 sc->priority < DEF_PRIORITY - 2))
2241		return true;
2242
2243	return false;
2244}
2245
2246/*
2247 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2248 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2249 * true if more pages should be reclaimed such that when the page allocator
2250 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2251 * It will give up earlier than that if there is difficulty reclaiming pages.
2252 */
2253static inline bool should_continue_reclaim(struct zone *zone,
2254					unsigned long nr_reclaimed,
2255					unsigned long nr_scanned,
2256					struct scan_control *sc)
2257{
2258	unsigned long pages_for_compaction;
2259	unsigned long inactive_lru_pages;
2260
2261	/* If not in reclaim/compaction mode, stop */
2262	if (!in_reclaim_compaction(sc))
2263		return false;
2264
2265	/* Consider stopping depending on scan and reclaim activity */
2266	if (sc->gfp_mask & __GFP_REPEAT) {
2267		/*
2268		 * For __GFP_REPEAT allocations, stop reclaiming if the
2269		 * full LRU list has been scanned and we are still failing
2270		 * to reclaim pages. This full LRU scan is potentially
2271		 * expensive but a __GFP_REPEAT caller really wants to succeed
2272		 */
2273		if (!nr_reclaimed && !nr_scanned)
2274			return false;
2275	} else {
2276		/*
2277		 * For non-__GFP_REPEAT allocations which can presumably
2278		 * fail without consequence, stop if we failed to reclaim
2279		 * any pages from the last SWAP_CLUSTER_MAX number of
2280		 * pages that were scanned. This will return to the
2281		 * caller faster at the risk reclaim/compaction and
2282		 * the resulting allocation attempt fails
2283		 */
2284		if (!nr_reclaimed)
2285			return false;
2286	}
2287
2288	/*
2289	 * If we have not reclaimed enough pages for compaction and the
2290	 * inactive lists are large enough, continue reclaiming
2291	 */
2292	pages_for_compaction = (2UL << sc->order);
2293	inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
2294	if (get_nr_swap_pages() > 0)
2295		inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
2296	if (sc->nr_reclaimed < pages_for_compaction &&
2297			inactive_lru_pages > pages_for_compaction)
2298		return true;
2299
2300	/* If compaction would go ahead or the allocation would succeed, stop */
2301	switch (compaction_suitable(zone, sc->order, 0, 0)) {
2302	case COMPACT_PARTIAL:
2303	case COMPACT_CONTINUE:
2304		return false;
2305	default:
2306		return true;
2307	}
2308}
2309
2310static bool shrink_zone(struct zone *zone, struct scan_control *sc,
2311			bool is_classzone)
2312{
2313	struct reclaim_state *reclaim_state = current->reclaim_state;
2314	unsigned long nr_reclaimed, nr_scanned;
2315	bool reclaimable = false;
2316
2317	do {
2318		struct mem_cgroup *root = sc->target_mem_cgroup;
2319		struct mem_cgroup_reclaim_cookie reclaim = {
2320			.zone = zone,
2321			.priority = sc->priority,
2322		};
2323		unsigned long zone_lru_pages = 0;
2324		struct mem_cgroup *memcg;
2325
2326		nr_reclaimed = sc->nr_reclaimed;
2327		nr_scanned = sc->nr_scanned;
2328
2329		memcg = mem_cgroup_iter(root, NULL, &reclaim);
2330		do {
2331			unsigned long lru_pages;
2332			unsigned long scanned;
2333			struct lruvec *lruvec;
2334			int swappiness;
2335
2336			if (mem_cgroup_low(root, memcg)) {
2337				if (!sc->may_thrash)
2338					continue;
2339				mem_cgroup_events(memcg, MEMCG_LOW, 1);
2340			}
2341
2342			lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2343			swappiness = mem_cgroup_swappiness(memcg);
2344			scanned = sc->nr_scanned;
2345
2346			shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
2347			zone_lru_pages += lru_pages;
2348
2349			if (memcg && is_classzone)
2350				shrink_slab(sc->gfp_mask, zone_to_nid(zone),
2351					    memcg, sc->nr_scanned - scanned,
2352					    lru_pages);
2353
2354			/*
2355			 * Direct reclaim and kswapd have to scan all memory
2356			 * cgroups to fulfill the overall scan target for the
2357			 * zone.
2358			 *
2359			 * Limit reclaim, on the other hand, only cares about
2360			 * nr_to_reclaim pages to be reclaimed and it will
2361			 * retry with decreasing priority if one round over the
2362			 * whole hierarchy is not sufficient.
2363			 */
2364			if (!global_reclaim(sc) &&
2365					sc->nr_reclaimed >= sc->nr_to_reclaim) {
2366				mem_cgroup_iter_break(root, memcg);
2367				break;
2368			}
2369		} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
2370
2371		/*
2372		 * Shrink the slab caches in the same proportion that
2373		 * the eligible LRU pages were scanned.
2374		 */
2375		if (global_reclaim(sc) && is_classzone)
2376			shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
2377				    sc->nr_scanned - nr_scanned,
2378				    zone_lru_pages);
2379
2380		if (reclaim_state) {
2381			sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2382			reclaim_state->reclaimed_slab = 0;
2383		}
2384
2385		vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
2386			   sc->nr_scanned - nr_scanned,
2387			   sc->nr_reclaimed - nr_reclaimed);
2388
2389		if (sc->nr_reclaimed - nr_reclaimed)
2390			reclaimable = true;
2391
2392	} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
2393					 sc->nr_scanned - nr_scanned, sc));
2394
2395	return reclaimable;
2396}
2397
2398/*
2399 * Returns true if compaction should go ahead for a high-order request, or
2400 * the high-order allocation would succeed without compaction.
2401 */
2402static inline bool compaction_ready(struct zone *zone, int order)
2403{
2404	unsigned long balance_gap, watermark;
2405	bool watermark_ok;
2406
2407	/*
2408	 * Compaction takes time to run and there are potentially other
2409	 * callers using the pages just freed. Continue reclaiming until
2410	 * there is a buffer of free pages available to give compaction
2411	 * a reasonable chance of completing and allocating the page
2412	 */
2413	balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
2414			zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
2415	watermark = high_wmark_pages(zone) + balance_gap + (2UL << order);
2416	watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
2417
2418	/*
2419	 * If compaction is deferred, reclaim up to a point where
2420	 * compaction will have a chance of success when re-enabled
2421	 */
2422	if (compaction_deferred(zone, order))
2423		return watermark_ok;
2424
2425	/*
2426	 * If compaction is not ready to start and allocation is not likely
2427	 * to succeed without it, then keep reclaiming.
2428	 */
2429	if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED)
2430		return false;
2431
2432	return watermark_ok;
2433}
2434
2435/*
2436 * This is the direct reclaim path, for page-allocating processes.  We only
2437 * try to reclaim pages from zones which will satisfy the caller's allocation
2438 * request.
2439 *
2440 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
2441 * Because:
2442 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
2443 *    allocation or
2444 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
2445 *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
2446 *    zone defense algorithm.
2447 *
2448 * If a zone is deemed to be full of pinned pages then just give it a light
2449 * scan then give up on it.
2450 *
2451 * Returns true if a zone was reclaimable.
2452 */
2453static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2454{
2455	struct zoneref *z;
2456	struct zone *zone;
2457	unsigned long nr_soft_reclaimed;
2458	unsigned long nr_soft_scanned;
2459	gfp_t orig_mask;
2460	enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
2461	bool reclaimable = false;
2462
2463	/*
2464	 * If the number of buffer_heads in the machine exceeds the maximum
2465	 * allowed level, force direct reclaim to scan the highmem zone as
2466	 * highmem pages could be pinning lowmem pages storing buffer_heads
2467	 */
2468	orig_mask = sc->gfp_mask;
2469	if (buffer_heads_over_limit)
2470		sc->gfp_mask |= __GFP_HIGHMEM;
2471
2472	for_each_zone_zonelist_nodemask(zone, z, zonelist,
2473					gfp_zone(sc->gfp_mask), sc->nodemask) {
2474		enum zone_type classzone_idx;
2475
2476		if (!populated_zone(zone))
2477			continue;
2478
2479		classzone_idx = requested_highidx;
2480		while (!populated_zone(zone->zone_pgdat->node_zones +
2481							classzone_idx))
2482			classzone_idx--;
2483
2484		/*
2485		 * Take care memory controller reclaiming has small influence
2486		 * to global LRU.
2487		 */
2488		if (global_reclaim(sc)) {
2489			if (!cpuset_zone_allowed(zone,
2490						 GFP_KERNEL | __GFP_HARDWALL))
2491				continue;
2492
2493			if (sc->priority != DEF_PRIORITY &&
2494			    !zone_reclaimable(zone))
2495				continue;	/* Let kswapd poll it */
2496
2497			/*
2498			 * If we already have plenty of memory free for
2499			 * compaction in this zone, don't free any more.
2500			 * Even though compaction is invoked for any
2501			 * non-zero order, only frequent costly order
2502			 * reclamation is disruptive enough to become a
2503			 * noticeable problem, like transparent huge
2504			 * page allocations.
2505			 */
2506			if (IS_ENABLED(CONFIG_COMPACTION) &&
2507			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
2508			    zonelist_zone_idx(z) <= requested_highidx &&
2509			    compaction_ready(zone, sc->order)) {
2510				sc->compaction_ready = true;
2511				continue;
2512			}
2513
2514			/*
2515			 * This steals pages from memory cgroups over softlimit
2516			 * and returns the number of reclaimed pages and
2517			 * scanned pages. This works for global memory pressure
2518			 * and balancing, not for a memcg's limit.
2519			 */
2520			nr_soft_scanned = 0;
2521			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2522						sc->order, sc->gfp_mask,
2523						&nr_soft_scanned);
2524			sc->nr_reclaimed += nr_soft_reclaimed;
2525			sc->nr_scanned += nr_soft_scanned;
2526			if (nr_soft_reclaimed)
2527				reclaimable = true;
2528			/* need some check for avoid more shrink_zone() */
2529		}
2530
2531		if (shrink_zone(zone, sc, zone_idx(zone) == classzone_idx))
2532			reclaimable = true;
2533
2534		if (global_reclaim(sc) &&
2535		    !reclaimable && zone_reclaimable(zone))
2536			reclaimable = true;
2537	}
2538
2539	/*
2540	 * Restore to original mask to avoid the impact on the caller if we
2541	 * promoted it to __GFP_HIGHMEM.
2542	 */
2543	sc->gfp_mask = orig_mask;
2544
2545	return reclaimable;
2546}
2547
2548/*
2549 * This is the main entry point to direct page reclaim.
2550 *
2551 * If a full scan of the inactive list fails to free enough memory then we
2552 * are "out of memory" and something needs to be killed.
2553 *
2554 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2555 * high - the zone may be full of dirty or under-writeback pages, which this
2556 * caller can't do much about.  We kick the writeback threads and take explicit
2557 * naps in the hope that some of these pages can be written.  But if the
2558 * allocating task holds filesystem locks which prevent writeout this might not
2559 * work, and the allocation attempt will fail.
2560 *
2561 * returns:	0, if no pages reclaimed
2562 * 		else, the number of pages reclaimed
2563 */
2564static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2565					  struct scan_control *sc)
2566{
2567	int initial_priority = sc->priority;
2568	unsigned long total_scanned = 0;
2569	unsigned long writeback_threshold;
2570	bool zones_reclaimable;
2571retry:
2572	delayacct_freepages_start();
2573
2574	if (global_reclaim(sc))
2575		count_vm_event(ALLOCSTALL);
2576
2577	do {
2578		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
2579				sc->priority);
2580		sc->nr_scanned = 0;
2581		zones_reclaimable = shrink_zones(zonelist, sc);
2582
2583		total_scanned += sc->nr_scanned;
2584		if (sc->nr_reclaimed >= sc->nr_to_reclaim)
2585			break;
2586
2587		if (sc->compaction_ready)
2588			break;
2589
2590		/*
2591		 * If we're getting trouble reclaiming, start doing
2592		 * writepage even in laptop mode.
2593		 */
2594		if (sc->priority < DEF_PRIORITY - 2)
2595			sc->may_writepage = 1;
2596
2597		/*
2598		 * Try to write back as many pages as we just scanned.  This
2599		 * tends to cause slow streaming writers to write data to the
2600		 * disk smoothly, at the dirtying rate, which is nice.   But
2601		 * that's undesirable in laptop mode, where we *want* lumpy
2602		 * writeout.  So in laptop mode, write out the whole world.
2603		 */
2604		writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2605		if (total_scanned > writeback_threshold) {
2606			wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2607						WB_REASON_TRY_TO_FREE_PAGES);
2608			sc->may_writepage = 1;
2609		}
2610	} while (--sc->priority >= 0);
2611
2612	delayacct_freepages_end();
2613
2614	if (sc->nr_reclaimed)
2615		return sc->nr_reclaimed;
2616
2617	/* Aborted reclaim to try compaction? don't OOM, then */
2618	if (sc->compaction_ready)
2619		return 1;
2620
2621	/* Untapped cgroup reserves?  Don't OOM, retry. */
2622	if (!sc->may_thrash) {
2623		sc->priority = initial_priority;
2624		sc->may_thrash = 1;
2625		goto retry;
2626	}
2627
2628	/* Any of the zones still reclaimable?  Don't OOM. */
2629	if (zones_reclaimable)
2630		return 1;
2631
2632	return 0;
2633}
2634
2635static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2636{
2637	struct zone *zone;
2638	unsigned long pfmemalloc_reserve = 0;
2639	unsigned long free_pages = 0;
2640	int i;
2641	bool wmark_ok;
2642
2643	for (i = 0; i <= ZONE_NORMAL; i++) {
2644		zone = &pgdat->node_zones[i];
2645		if (!populated_zone(zone))
2646			continue;
2647
2648		pfmemalloc_reserve += min_wmark_pages(zone);
2649		free_pages += zone_page_state(zone, NR_FREE_PAGES);
2650	}
2651
2652	/* If there are no reserves (unexpected config) then do not throttle */
2653	if (!pfmemalloc_reserve)
2654		return true;
2655
2656	wmark_ok = free_pages > pfmemalloc_reserve / 2;
2657
2658	/* kswapd must be awake if processes are being throttled */
2659	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2660		pgdat->classzone_idx = min(pgdat->classzone_idx,
2661						(enum zone_type)ZONE_NORMAL);
2662		wake_up_interruptible(&pgdat->kswapd_wait);
2663	}
2664
2665	return wmark_ok;
2666}
2667
2668/*
2669 * Throttle direct reclaimers if backing storage is backed by the network
2670 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2671 * depleted. kswapd will continue to make progress and wake the processes
2672 * when the low watermark is reached.
2673 *
2674 * Returns true if a fatal signal was delivered during throttling. If this
2675 * happens, the page allocator should not consider triggering the OOM killer.
2676 */
2677static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2678					nodemask_t *nodemask)
2679{
2680	struct zoneref *z;
2681	struct zone *zone;
2682	pg_data_t *pgdat = NULL;
2683
2684	/*
2685	 * Kernel threads should not be throttled as they may be indirectly
2686	 * responsible for cleaning pages necessary for reclaim to make forward
2687	 * progress. kjournald for example may enter direct reclaim while
2688	 * committing a transaction where throttling it could forcing other
2689	 * processes to block on log_wait_commit().
2690	 */
2691	if (current->flags & PF_KTHREAD)
2692		goto out;
2693
2694	/*
2695	 * If a fatal signal is pending, this process should not throttle.
2696	 * It should return quickly so it can exit and free its memory
2697	 */
2698	if (fatal_signal_pending(current))
2699		goto out;
2700
2701	/*
2702	 * Check if the pfmemalloc reserves are ok by finding the first node
2703	 * with a usable ZONE_NORMAL or lower zone. The expectation is that
2704	 * GFP_KERNEL will be required for allocating network buffers when
2705	 * swapping over the network so ZONE_HIGHMEM is unusable.
2706	 *
2707	 * Throttling is based on the first usable node and throttled processes
2708	 * wait on a queue until kswapd makes progress and wakes them. There
2709	 * is an affinity then between processes waking up and where reclaim
2710	 * progress has been made assuming the process wakes on the same node.
2711	 * More importantly, processes running on remote nodes will not compete
2712	 * for remote pfmemalloc reserves and processes on different nodes
2713	 * should make reasonable progress.
2714	 */
2715	for_each_zone_zonelist_nodemask(zone, z, zonelist,
2716					gfp_zone(gfp_mask), nodemask) {
2717		if (zone_idx(zone) > ZONE_NORMAL)
2718			continue;
2719
2720		/* Throttle based on the first usable node */
2721		pgdat = zone->zone_pgdat;
2722		if (pfmemalloc_watermark_ok(pgdat))
2723			goto out;
2724		break;
2725	}
2726
2727	/* If no zone was usable by the allocation flags then do not throttle */
2728	if (!pgdat)
2729		goto out;
2730
2731	/* Account for the throttling */
2732	count_vm_event(PGSCAN_DIRECT_THROTTLE);
2733
2734	/*
2735	 * If the caller cannot enter the filesystem, it's possible that it
2736	 * is due to the caller holding an FS lock or performing a journal
2737	 * transaction in the case of a filesystem like ext[3|4]. In this case,
2738	 * it is not safe to block on pfmemalloc_wait as kswapd could be
2739	 * blocked waiting on the same lock. Instead, throttle for up to a
2740	 * second before continuing.
2741	 */
2742	if (!(gfp_mask & __GFP_FS)) {
2743		wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2744			pfmemalloc_watermark_ok(pgdat), HZ);
2745
2746		goto check_pending;
2747	}
2748
2749	/* Throttle until kswapd wakes the process */
2750	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2751		pfmemalloc_watermark_ok(pgdat));
2752
2753check_pending:
2754	if (fatal_signal_pending(current))
2755		return true;
2756
2757out:
2758	return false;
2759}
2760
2761unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2762				gfp_t gfp_mask, nodemask_t *nodemask)
2763{
2764	unsigned long nr_reclaimed;
2765	struct scan_control sc = {
2766		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2767		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
2768		.order = order,
2769		.nodemask = nodemask,
2770		.priority = DEF_PRIORITY,
2771		.may_writepage = !laptop_mode,
2772		.may_unmap = 1,
2773		.may_swap = 1,
2774	};
2775
2776	/*
2777	 * Do not enter reclaim if fatal signal was delivered while throttled.
2778	 * 1 is returned so that the page allocator does not OOM kill at this
2779	 * point.
2780	 */
2781	if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
2782		return 1;
2783
2784	trace_mm_vmscan_direct_reclaim_begin(order,
2785				sc.may_writepage,
2786				gfp_mask);
2787
2788	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2789
2790	trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2791
2792	return nr_reclaimed;
2793}
2794
2795#ifdef CONFIG_MEMCG
2796
2797unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2798						gfp_t gfp_mask, bool noswap,
2799						struct zone *zone,
2800						unsigned long *nr_scanned)
2801{
2802	struct scan_control sc = {
2803		.nr_to_reclaim = SWAP_CLUSTER_MAX,
2804		.target_mem_cgroup = memcg,
2805		.may_writepage = !laptop_mode,
2806		.may_unmap = 1,
2807		.may_swap = !noswap,
2808	};
2809	struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2810	int swappiness = mem_cgroup_swappiness(memcg);
2811	unsigned long lru_pages;
2812
2813	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2814			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2815
2816	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2817						      sc.may_writepage,
2818						      sc.gfp_mask);
2819
2820	/*
2821	 * NOTE: Although we can get the priority field, using it
2822	 * here is not a good idea, since it limits the pages we can scan.
2823	 * if we don't reclaim here, the shrink_zone from balance_pgdat
2824	 * will pick up pages from other mem cgroup's as well. We hack
2825	 * the priority and make it zero.
2826	 */
2827	shrink_lruvec(lruvec, swappiness, &sc, &lru_pages);
2828
2829	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2830
2831	*nr_scanned = sc.nr_scanned;
2832	return sc.nr_reclaimed;
2833}
2834
2835unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2836					   unsigned long nr_pages,
2837					   gfp_t gfp_mask,
2838					   bool may_swap)
2839{
2840	struct zonelist *zonelist;
2841	unsigned long nr_reclaimed;
2842	int nid;
2843	struct scan_control sc = {
2844		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
2845		.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2846				(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2847		.target_mem_cgroup = memcg,
2848		.priority = DEF_PRIORITY,
2849		.may_writepage = !laptop_mode,
2850		.may_unmap = 1,
2851		.may_swap = may_swap,
2852	};
2853
2854	/*
2855	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2856	 * take care of from where we get pages. So the node where we start the
2857	 * scan does not need to be the current node.
2858	 */
2859	nid = mem_cgroup_select_victim_node(memcg);
2860
2861	zonelist = NODE_DATA(nid)->node_zonelists;
2862
2863	trace_mm_vmscan_memcg_reclaim_begin(0,
2864					    sc.may_writepage,
2865					    sc.gfp_mask);
2866
2867	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
2868
2869	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2870
2871	return nr_reclaimed;
2872}
2873#endif
2874
2875static void age_active_anon(struct zone *zone, struct scan_control *sc)
2876{
2877	struct mem_cgroup *memcg;
2878
2879	if (!total_swap_pages)
2880		return;
2881
2882	memcg = mem_cgroup_iter(NULL, NULL, NULL);
2883	do {
2884		struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2885
2886		if (inactive_anon_is_low(lruvec))
2887			shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2888					   sc, LRU_ACTIVE_ANON);
2889
2890		memcg = mem_cgroup_iter(NULL, memcg, NULL);
2891	} while (memcg);
2892}
2893
2894static bool zone_balanced(struct zone *zone, int order,
2895			  unsigned long balance_gap, int classzone_idx)
2896{
2897	if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
2898				    balance_gap, classzone_idx, 0))
2899		return false;
2900
2901	if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
2902				order, 0, classzone_idx) == COMPACT_SKIPPED)
2903		return false;
2904
2905	return true;
2906}
2907
2908/*
2909 * pgdat_balanced() is used when checking if a node is balanced.
2910 *
2911 * For order-0, all zones must be balanced!
2912 *
2913 * For high-order allocations only zones that meet watermarks and are in a
2914 * zone allowed by the callers classzone_idx are added to balanced_pages. The
2915 * total of balanced pages must be at least 25% of the zones allowed by
2916 * classzone_idx for the node to be considered balanced. Forcing all zones to
2917 * be balanced for high orders can cause excessive reclaim when there are
2918 * imbalanced zones.
2919 * The choice of 25% is due to
2920 *   o a 16M DMA zone that is balanced will not balance a zone on any
2921 *     reasonable sized machine
2922 *   o On all other machines, the top zone must be at least a reasonable
2923 *     percentage of the middle zones. For example, on 32-bit x86, highmem
2924 *     would need to be at least 256M for it to be balance a whole node.
2925 *     Similarly, on x86-64 the Normal zone would need to be at least 1G
2926 *     to balance a node on its own. These seemed like reasonable ratios.
2927 */
2928static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
2929{
2930	unsigned long managed_pages = 0;
2931	unsigned long balanced_pages = 0;
2932	int i;
2933
2934	/* Check the watermark levels */
2935	for (i = 0; i <= classzone_idx; i++) {
2936		struct zone *zone = pgdat->node_zones + i;
2937
2938		if (!populated_zone(zone))
2939			continue;
2940
2941		managed_pages += zone->managed_pages;
2942
2943		/*
2944		 * A special case here:
2945		 *
2946		 * balance_pgdat() skips over all_unreclaimable after
2947		 * DEF_PRIORITY. Effectively, it considers them balanced so
2948		 * they must be considered balanced here as well!
2949		 */
2950		if (!zone_reclaimable(zone)) {
2951			balanced_pages += zone->managed_pages;
2952			continue;
2953		}
2954
2955		if (zone_balanced(zone, order, 0, i))
2956			balanced_pages += zone->managed_pages;
2957		else if (!order)
2958			return false;
2959	}
2960
2961	if (order)
2962		return balanced_pages >= (managed_pages >> 2);
2963	else
2964		return true;
2965}
2966
2967/*
2968 * Prepare kswapd for sleeping. This verifies that there are no processes
2969 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2970 *
2971 * Returns true if kswapd is ready to sleep
2972 */
2973static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
2974					int classzone_idx)
2975{
2976	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2977	if (remaining)
2978		return false;
2979
2980	/*
2981	 * The throttled processes are normally woken up in balance_pgdat() as
2982	 * soon as pfmemalloc_watermark_ok() is true. But there is a potential
2983	 * race between when kswapd checks the watermarks and a process gets
2984	 * throttled. There is also a potential race if processes get
2985	 * throttled, kswapd wakes, a large process exits thereby balancing the
2986	 * zones, which causes kswapd to exit balance_pgdat() before reaching
2987	 * the wake up checks. If kswapd is going to sleep, no process should
2988	 * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
2989	 * the wake up is premature, processes will wake kswapd and get
2990	 * throttled again. The difference from wake ups in balance_pgdat() is
2991	 * that here we are under prepare_to_wait().
2992	 */
2993	if (waitqueue_active(&pgdat->pfmemalloc_wait))
2994		wake_up_all(&pgdat->pfmemalloc_wait);
2995
2996	return pgdat_balanced(pgdat, order, classzone_idx);
2997}
2998
2999/*
3000 * kswapd shrinks the zone by the number of pages required to reach
3001 * the high watermark.
3002 *
3003 * Returns true if kswapd scanned at least the requested number of pages to
3004 * reclaim or if the lack of progress was due to pages under writeback.
3005 * This is used to determine if the scanning priority needs to be raised.
3006 */
3007static bool kswapd_shrink_zone(struct zone *zone,
3008			       int classzone_idx,
3009			       struct scan_control *sc,
3010			       unsigned long *nr_attempted)
3011{
3012	int testorder = sc->order;
3013	unsigned long balance_gap;
3014	bool lowmem_pressure;
3015
3016	/* Reclaim above the high watermark. */
3017	sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
3018
3019	/*
3020	 * Kswapd reclaims only single pages with compaction enabled. Trying
3021	 * too hard to reclaim until contiguous free pages have become
3022	 * available can hurt performance by evicting too much useful data
3023	 * from memory. Do not reclaim more than needed for compaction.
3024	 */
3025	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
3026			compaction_suitable(zone, sc->order, 0, classzone_idx)
3027							!= COMPACT_SKIPPED)
3028		testorder = 0;
3029
3030	/*
3031	 * We put equal pressure on every zone, unless one zone has way too
3032	 * many pages free already. The "too many pages" is defined as the
3033	 * high wmark plus a "gap" where the gap is either the low
3034	 * watermark or 1% of the zone, whichever is smaller.
3035	 */
3036	balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
3037			zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
3038
3039	/*
3040	 * If there is no low memory pressure or the zone is balanced then no
3041	 * reclaim is necessary
3042	 */
3043	lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
3044	if (!lowmem_pressure && zone_balanced(zone, testorder,
3045						balance_gap, classzone_idx))
3046		return true;
3047
3048	shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
3049
3050	/* Account for the number of pages attempted to reclaim */
3051	*nr_attempted += sc->nr_to_reclaim;
3052
3053	clear_bit(ZONE_WRITEBACK, &zone->flags);
3054
3055	/*
3056	 * If a zone reaches its high watermark, consider it to be no longer
3057	 * congested. It's possible there are dirty pages backed by congested
3058	 * BDIs but as pressure is relieved, speculatively avoid congestion
3059	 * waits.
3060	 */
3061	if (zone_reclaimable(zone) &&
3062	    zone_balanced(zone, testorder, 0, classzone_idx)) {
3063		clear_bit(ZONE_CONGESTED, &zone->flags);
3064		clear_bit(ZONE_DIRTY, &zone->flags);
3065	}
3066
3067	return sc->nr_scanned >= sc->nr_to_reclaim;
3068}
3069
3070/*
3071 * For kswapd, balance_pgdat() will work across all this node's zones until
3072 * they are all at high_wmark_pages(zone).
3073 *
3074 * Returns the final order kswapd was reclaiming at
3075 *
3076 * There is special handling here for zones which are full of pinned pages.
3077 * This can happen if the pages are all mlocked, or if they are all used by
3078 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
3079 * What we do is to detect the case where all pages in the zone have been
3080 * scanned twice and there has been zero successful reclaim.  Mark the zone as
3081 * dead and from now on, only perform a short scan.  Basically we're polling
3082 * the zone for when the problem goes away.
3083 *
3084 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
3085 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
3086 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
3087 * lower zones regardless of the number of free pages in the lower zones. This
3088 * interoperates with the page allocator fallback scheme to ensure that aging
3089 * of pages is balanced across the zones.
3090 */
3091static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
3092							int *classzone_idx)
3093{
3094	int i;
3095	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
3096	unsigned long nr_soft_reclaimed;
3097	unsigned long nr_soft_scanned;
3098	struct scan_control sc = {
3099		.gfp_mask = GFP_KERNEL,
3100		.order = order,
3101		.priority = DEF_PRIORITY,
3102		.may_writepage = !laptop_mode,
3103		.may_unmap = 1,
3104		.may_swap = 1,
3105	};
3106	count_vm_event(PAGEOUTRUN);
3107
3108	do {
3109		unsigned long nr_attempted = 0;
3110		bool raise_priority = true;
3111		bool pgdat_needs_compaction = (order > 0);
3112
3113		sc.nr_reclaimed = 0;
3114
3115		/*
3116		 * Scan in the highmem->dma direction for the highest
3117		 * zone which needs scanning
3118		 */
3119		for (i = pgdat->nr_zones - 1; i >= 0; i--) {
3120			struct zone *zone = pgdat->node_zones + i;
3121
3122			if (!populated_zone(zone))
3123				continue;
3124
3125			if (sc.priority != DEF_PRIORITY &&
3126			    !zone_reclaimable(zone))
3127				continue;
3128
3129			/*
3130			 * Do some background aging of the anon list, to give
3131			 * pages a chance to be referenced before reclaiming.
3132			 */
3133			age_active_anon(zone, &sc);
3134
3135			/*
3136			 * If the number of buffer_heads in the machine
3137			 * exceeds the maximum allowed level and this node
3138			 * has a highmem zone, force kswapd to reclaim from
3139			 * it to relieve lowmem pressure.
3140			 */
3141			if (buffer_heads_over_limit && is_highmem_idx(i)) {
3142				end_zone = i;
3143				break;
3144			}
3145
3146			if (!zone_balanced(zone, order, 0, 0)) {
3147				end_zone = i;
3148				break;
3149			} else {
3150				/*
3151				 * If balanced, clear the dirty and congested
3152				 * flags
3153				 */
3154				clear_bit(ZONE_CONGESTED, &zone->flags);
3155				clear_bit(ZONE_DIRTY, &zone->flags);
3156			}
3157		}
3158
3159		if (i < 0)
3160			goto out;
3161
3162		for (i = 0; i <= end_zone; i++) {
3163			struct zone *zone = pgdat->node_zones + i;
3164
3165			if (!populated_zone(zone))
3166				continue;
3167
3168			/*
3169			 * If any zone is currently balanced then kswapd will
3170			 * not call compaction as it is expected that the
3171			 * necessary pages are already available.
3172			 */
3173			if (pgdat_needs_compaction &&
3174					zone_watermark_ok(zone, order,
3175						low_wmark_pages(zone),
3176						*classzone_idx, 0))
3177				pgdat_needs_compaction = false;
3178		}
3179
3180		/*
3181		 * If we're getting trouble reclaiming, start doing writepage
3182		 * even in laptop mode.
3183		 */
3184		if (sc.priority < DEF_PRIORITY - 2)
3185			sc.may_writepage = 1;
3186
3187		/*
3188		 * Now scan the zone in the dma->highmem direction, stopping
3189		 * at the last zone which needs scanning.
3190		 *
3191		 * We do this because the page allocator works in the opposite
3192		 * direction.  This prevents the page allocator from allocating
3193		 * pages behind kswapd's direction of progress, which would
3194		 * cause too much scanning of the lower zones.
3195		 */
3196		for (i = 0; i <= end_zone; i++) {
3197			struct zone *zone = pgdat->node_zones + i;
3198
3199			if (!populated_zone(zone))
3200				continue;
3201
3202			if (sc.priority != DEF_PRIORITY &&
3203			    !zone_reclaimable(zone))
3204				continue;
3205
3206			sc.nr_scanned = 0;
3207
3208			nr_soft_scanned = 0;
3209			/*
3210			 * Call soft limit reclaim before calling shrink_zone.
3211			 */
3212			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
3213							order, sc.gfp_mask,
3214							&nr_soft_scanned);
3215			sc.nr_reclaimed += nr_soft_reclaimed;
3216
3217			/*
3218			 * There should be no need to raise the scanning
3219			 * priority if enough pages are already being scanned
3220			 * that that high watermark would be met at 100%
3221			 * efficiency.
3222			 */
3223			if (kswapd_shrink_zone(zone, end_zone,
3224					       &sc, &nr_attempted))
3225				raise_priority = false;
3226		}
3227
3228		/*
3229		 * If the low watermark is met there is no need for processes
3230		 * to be throttled on pfmemalloc_wait as they should not be
3231		 * able to safely make forward progress. Wake them
3232		 */
3233		if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
3234				pfmemalloc_watermark_ok(pgdat))
3235			wake_up_all(&pgdat->pfmemalloc_wait);
3236
3237		/*
3238		 * Fragmentation may mean that the system cannot be rebalanced
3239		 * for high-order allocations in all zones. If twice the
3240		 * allocation size has been reclaimed and the zones are still
3241		 * not balanced then recheck the watermarks at order-0 to
3242		 * prevent kswapd reclaiming excessively. Assume that a
3243		 * process requested a high-order can direct reclaim/compact.
3244		 */
3245		if (order && sc.nr_reclaimed >= 2UL << order)
3246			order = sc.order = 0;
3247
3248		/* Check if kswapd should be suspending */
3249		if (try_to_freeze() || kthread_should_stop())
3250			break;
3251
3252		/*
3253		 * Compact if necessary and kswapd is reclaiming at least the
3254		 * high watermark number of pages as requsted
3255		 */
3256		if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
3257			compact_pgdat(pgdat, order);
3258
3259		/*
3260		 * Raise priority if scanning rate is too low or there was no
3261		 * progress in reclaiming pages
3262		 */
3263		if (raise_priority || !sc.nr_reclaimed)
3264			sc.priority--;
3265	} while (sc.priority >= 1 &&
3266		 !pgdat_balanced(pgdat, order, *classzone_idx));
3267
3268out:
3269	/*
3270	 * Return the order we were reclaiming at so prepare_kswapd_sleep()
3271	 * makes a decision on the order we were last reclaiming at. However,
3272	 * if another caller entered the allocator slow path while kswapd
3273	 * was awake, order will remain at the higher level
3274	 */
3275	*classzone_idx = end_zone;
3276	return order;
3277}
3278
3279static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
3280{
3281	long remaining = 0;
3282	DEFINE_WAIT(wait);
3283
3284	if (freezing(current) || kthread_should_stop())
3285		return;
3286
3287	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3288
3289	/* Try to sleep for a short interval */
3290	if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3291		remaining = schedule_timeout(HZ/10);
3292		finish_wait(&pgdat->kswapd_wait, &wait);
3293		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
3294	}
3295
3296	/*
3297	 * After a short sleep, check if it was a premature sleep. If not, then
3298	 * go fully to sleep until explicitly woken up.
3299	 */
3300	if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
3301		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
3302
3303		/*
3304		 * vmstat counters are not perfectly accurate and the estimated
3305		 * value for counters such as NR_FREE_PAGES can deviate from the
3306		 * true value by nr_online_cpus * threshold. To avoid the zone
3307		 * watermarks being breached while under pressure, we reduce the
3308		 * per-cpu vmstat threshold while kswapd is awake and restore
3309		 * them before going back to sleep.
3310		 */
3311		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3312
3313		/*
3314		 * Compaction records what page blocks it recently failed to
3315		 * isolate pages from and skips them in the future scanning.
3316		 * When kswapd is going to sleep, it is reasonable to assume
3317		 * that pages and compaction may succeed so reset the cache.
3318		 */
3319		reset_isolation_suitable(pgdat);
3320
3321		if (!kthread_should_stop())
3322			schedule();
3323
3324		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
3325	} else {
3326		if (remaining)
3327			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
3328		else
3329			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
3330	}
3331	finish_wait(&pgdat->kswapd_wait, &wait);
3332}
3333
3334/*
3335 * The background pageout daemon, started as a kernel thread
3336 * from the init process.
3337 *
3338 * This basically trickles out pages so that we have _some_
3339 * free memory available even if there is no other activity
3340 * that frees anything up. This is needed for things like routing
3341 * etc, where we otherwise might have all activity going on in
3342 * asynchronous contexts that cannot page things out.
3343 *
3344 * If there are applications that are active memory-allocators
3345 * (most normal use), this basically shouldn't matter.
3346 */
3347static int kswapd(void *p)
3348{
3349	unsigned long order, new_order;
3350	unsigned balanced_order;
3351	int classzone_idx, new_classzone_idx;
3352	int balanced_classzone_idx;
3353	pg_data_t *pgdat = (pg_data_t*)p;
3354	struct task_struct *tsk = current;
3355
3356	struct reclaim_state reclaim_state = {
3357		.reclaimed_slab = 0,
3358	};
3359	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
3360
3361	lockdep_set_current_reclaim_state(GFP_KERNEL);
3362
3363	if (!cpumask_empty(cpumask))
3364		set_cpus_allowed_ptr(tsk, cpumask);
3365	current->reclaim_state = &reclaim_state;
3366
3367	/*
3368	 * Tell the memory management that we're a "memory allocator",
3369	 * and that if we need more memory we should get access to it
3370	 * regardless (see "__alloc_pages()"). "kswapd" should
3371	 * never get caught in the normal page freeing logic.
3372	 *
3373	 * (Kswapd normally doesn't need memory anyway, but sometimes
3374	 * you need a small amount of memory in order to be able to
3375	 * page out something else, and this flag essentially protects
3376	 * us from recursively trying to free more memory as we're
3377	 * trying to free the first piece of memory in the first place).
3378	 */
3379	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
3380	set_freezable();
3381
3382	order = new_order = 0;
3383	balanced_order = 0;
3384	classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
3385	balanced_classzone_idx = classzone_idx;
3386	for ( ; ; ) {
3387		bool ret;
3388
3389		/*
3390		 * If the last balance_pgdat was unsuccessful it's unlikely a
3391		 * new request of a similar or harder type will succeed soon
3392		 * so consider going to sleep on the basis we reclaimed at
3393		 */
3394		if (balanced_classzone_idx >= new_classzone_idx &&
3395					balanced_order == new_order) {
3396			new_order = pgdat->kswapd_max_order;
3397			new_classzone_idx = pgdat->classzone_idx;
3398			pgdat->kswapd_max_order =  0;
3399			pgdat->classzone_idx = pgdat->nr_zones - 1;
3400		}
3401
3402		if (order < new_order || classzone_idx > new_classzone_idx) {
3403			/*
3404			 * Don't sleep if someone wants a larger 'order'
3405			 * allocation or has tigher zone constraints
3406			 */
3407			order = new_order;
3408			classzone_idx = new_classzone_idx;
3409		} else {
3410			kswapd_try_to_sleep(pgdat, balanced_order,
3411						balanced_classzone_idx);
3412			order = pgdat->kswapd_max_order;
3413			classzone_idx = pgdat->classzone_idx;
3414			new_order = order;
3415			new_classzone_idx = classzone_idx;
3416			pgdat->kswapd_max_order = 0;
3417			pgdat->classzone_idx = pgdat->nr_zones - 1;
3418		}
3419
3420		ret = try_to_freeze();
3421		if (kthread_should_stop())
3422			break;
3423
3424		/*
3425		 * We can speed up thawing tasks if we don't call balance_pgdat
3426		 * after returning from the refrigerator
3427		 */
3428		if (!ret) {
3429			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
3430			balanced_classzone_idx = classzone_idx;
3431			balanced_order = balance_pgdat(pgdat, order,
3432						&balanced_classzone_idx);
3433		}
3434	}
3435
3436	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
3437	current->reclaim_state = NULL;
3438	lockdep_clear_current_reclaim_state();
3439
3440	return 0;
3441}
3442
3443/*
3444 * A zone is low on free memory, so wake its kswapd task to service it.
3445 */
3446void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3447{
3448	pg_data_t *pgdat;
3449
3450	if (!populated_zone(zone))
3451		return;
3452
3453	if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3454		return;
3455	pgdat = zone->zone_pgdat;
3456	if (pgdat->kswapd_max_order < order) {
3457		pgdat->kswapd_max_order = order;
3458		pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
3459	}
3460	if (!waitqueue_active(&pgdat->kswapd_wait))
3461		return;
3462	if (zone_balanced(zone, order, 0, 0))
3463		return;
3464
3465	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
3466	wake_up_interruptible(&pgdat->kswapd_wait);
3467}
3468
3469#ifdef CONFIG_HIBERNATION
3470/*
3471 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
3472 * freed pages.
3473 *
3474 * Rather than trying to age LRUs the aim is to preserve the overall
3475 * LRU order by reclaiming preferentially
3476 * inactive > active > active referenced > active mapped
3477 */
3478unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3479{
3480	struct reclaim_state reclaim_state;
3481	struct scan_control sc = {
3482		.nr_to_reclaim = nr_to_reclaim,
3483		.gfp_mask = GFP_HIGHUSER_MOVABLE,
3484		.priority = DEF_PRIORITY,
3485		.may_writepage = 1,
3486		.may_unmap = 1,
3487		.may_swap = 1,
3488		.hibernation_mode = 1,
3489	};
3490	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
3491	struct task_struct *p = current;
3492	unsigned long nr_reclaimed;
3493
3494	p->flags |= PF_MEMALLOC;
3495	lockdep_set_current_reclaim_state(sc.gfp_mask);
3496	reclaim_state.reclaimed_slab = 0;
3497	p->reclaim_state = &reclaim_state;
3498
3499	nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
3500
3501	p->reclaim_state = NULL;
3502	lockdep_clear_current_reclaim_state();
3503	p->flags &= ~PF_MEMALLOC;
3504
3505	return nr_reclaimed;
3506}
3507#endif /* CONFIG_HIBERNATION */
3508
3509/* It's optimal to keep kswapds on the same CPUs as their memory, but
3510   not required for correctness.  So if the last cpu in a node goes
3511   away, we get changed to run anywhere: as the first one comes back,
3512   restore their cpu bindings. */
3513static int cpu_callback(struct notifier_block *nfb, unsigned long action,
3514			void *hcpu)
3515{
3516	int nid;
3517
3518	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
3519		for_each_node_state(nid, N_MEMORY) {
3520			pg_data_t *pgdat = NODE_DATA(nid);
3521			const struct cpumask *mask;
3522
3523			mask = cpumask_of_node(pgdat->node_id);
3524
3525			if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
3526				/* One of our CPUs online: restore mask */
3527				set_cpus_allowed_ptr(pgdat->kswapd, mask);
3528		}
3529	}
3530	return NOTIFY_OK;
3531}
3532
3533/*
3534 * This kswapd start function will be called by init and node-hot-add.
3535 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3536 */
3537int kswapd_run(int nid)
3538{
3539	pg_data_t *pgdat = NODE_DATA(nid);
3540	int ret = 0;
3541
3542	if (pgdat->kswapd)
3543		return 0;
3544
3545	pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3546	if (IS_ERR(pgdat->kswapd)) {
3547		/* failure at boot is fatal */
3548		BUG_ON(system_state == SYSTEM_BOOTING);
3549		pr_err("Failed to start kswapd on node %d\n", nid);
3550		ret = PTR_ERR(pgdat->kswapd);
3551		pgdat->kswapd = NULL;
3552	}
3553	return ret;
3554}
3555
3556/*
3557 * Called by memory hotplug when all memory in a node is offlined.  Caller must
3558 * hold mem_hotplug_begin/end().
3559 */
3560void kswapd_stop(int nid)
3561{
3562	struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3563
3564	if (kswapd) {
3565		kthread_stop(kswapd);
3566		NODE_DATA(nid)->kswapd = NULL;
3567	}
3568}
3569
3570static int __init kswapd_init(void)
3571{
3572	int nid;
3573
3574	swap_setup();
3575	for_each_node_state(nid, N_MEMORY)
3576 		kswapd_run(nid);
3577	hotcpu_notifier(cpu_callback, 0);
3578	return 0;
3579}
3580
3581module_init(kswapd_init)
3582
3583#ifdef CONFIG_NUMA
3584/*
3585 * Zone reclaim mode
3586 *
3587 * If non-zero call zone_reclaim when the number of free pages falls below
3588 * the watermarks.
3589 */
3590int zone_reclaim_mode __read_mostly;
3591
3592#define RECLAIM_OFF 0
3593#define RECLAIM_ZONE (1<<0)	/* Run shrink_inactive_list on the zone */
3594#define RECLAIM_WRITE (1<<1)	/* Writeout pages during reclaim */
3595#define RECLAIM_SWAP (1<<2)	/* Swap pages out during reclaim */
3596
3597/*
3598 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3599 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3600 * a zone.
3601 */
3602#define ZONE_RECLAIM_PRIORITY 4
3603
3604/*
3605 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3606 * occur.
3607 */
3608int sysctl_min_unmapped_ratio = 1;
3609
3610/*
3611 * If the number of slab pages in a zone grows beyond this percentage then
3612 * slab reclaim needs to occur.
3613 */
3614int sysctl_min_slab_ratio = 5;
3615
3616static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3617{
3618	unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3619	unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3620		zone_page_state(zone, NR_ACTIVE_FILE);
3621
3622	/*
3623	 * It's possible for there to be more file mapped pages than
3624	 * accounted for by the pages on the file LRU lists because
3625	 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3626	 */
3627	return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3628}
3629
3630/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3631static long zone_pagecache_reclaimable(struct zone *zone)
3632{
3633	long nr_pagecache_reclaimable;
3634	long delta = 0;
3635
3636	/*
3637	 * If RECLAIM_SWAP is set, then all file pages are considered
3638	 * potentially reclaimable. Otherwise, we have to worry about
3639	 * pages like swapcache and zone_unmapped_file_pages() provides
3640	 * a better estimate
3641	 */
3642	if (zone_reclaim_mode & RECLAIM_SWAP)
3643		nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3644	else
3645		nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3646
3647	/* If we can't clean pages, remove dirty pages from consideration */
3648	if (!(zone_reclaim_mode & RECLAIM_WRITE))
3649		delta += zone_page_state(zone, NR_FILE_DIRTY);
3650
3651	/* Watch for any possible underflows due to delta */
3652	if (unlikely(delta > nr_pagecache_reclaimable))
3653		delta = nr_pagecache_reclaimable;
3654
3655	return nr_pagecache_reclaimable - delta;
3656}
3657
3658/*
3659 * Try to free up some pages from this zone through reclaim.
3660 */
3661static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3662{
3663	/* Minimum pages needed in order to stay on node */
3664	const unsigned long nr_pages = 1 << order;
3665	struct task_struct *p = current;
3666	struct reclaim_state reclaim_state;
3667	struct scan_control sc = {
3668		.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
3669		.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
3670		.order = order,
3671		.priority = ZONE_RECLAIM_PRIORITY,
3672		.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3673		.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
3674		.may_swap = 1,
3675	};
3676
3677	cond_resched();
3678	/*
3679	 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3680	 * and we also need to be able to write out pages for RECLAIM_WRITE
3681	 * and RECLAIM_SWAP.
3682	 */
3683	p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
3684	lockdep_set_current_reclaim_state(gfp_mask);
3685	reclaim_state.reclaimed_slab = 0;
3686	p->reclaim_state = &reclaim_state;
3687
3688	if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
3689		/*
3690		 * Free memory by calling shrink zone with increasing
3691		 * priorities until we have enough memory freed.
3692		 */
3693		do {
3694			shrink_zone(zone, &sc, true);
3695		} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3696	}
3697
3698	p->reclaim_state = NULL;
3699	current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
3700	lockdep_clear_current_reclaim_state();
3701	return sc.nr_reclaimed >= nr_pages;
3702}
3703
3704int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3705{
3706	int node_id;
3707	int ret;
3708
3709	/*
3710	 * Zone reclaim reclaims unmapped file backed pages and
3711	 * slab pages if we are over the defined limits.
3712	 *
3713	 * A small portion of unmapped file backed pages is needed for
3714	 * file I/O otherwise pages read by file I/O will be immediately
3715	 * thrown out if the zone is overallocated. So we do not reclaim
3716	 * if less than a specified percentage of the zone is used by
3717	 * unmapped file backed pages.
3718	 */
3719	if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3720	    zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
3721		return ZONE_RECLAIM_FULL;
3722
3723	if (!zone_reclaimable(zone))
3724		return ZONE_RECLAIM_FULL;
3725
3726	/*
3727	 * Do not scan if the allocation should not be delayed.
3728	 */
3729	if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
3730		return ZONE_RECLAIM_NOSCAN;
3731
3732	/*
3733	 * Only run zone reclaim on the local zone or on zones that do not
3734	 * have associated processors. This will favor the local processor
3735	 * over remote processors and spread off node memory allocations
3736	 * as wide as possible.
3737	 */
3738	node_id = zone_to_nid(zone);
3739	if (node_state(node_id, N_CPU) && node_id != numa_node_id())
3740		return ZONE_RECLAIM_NOSCAN;
3741
3742	if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags))
3743		return ZONE_RECLAIM_NOSCAN;
3744
3745	ret = __zone_reclaim(zone, gfp_mask, order);
3746	clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
3747
3748	if (!ret)
3749		count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3750
3751	return ret;
3752}
3753#endif
3754
3755/*
3756 * page_evictable - test whether a page is evictable
3757 * @page: the page to test
3758 *
3759 * Test whether page is evictable--i.e., should be placed on active/inactive
3760 * lists vs unevictable list.
3761 *
3762 * Reasons page might not be evictable:
3763 * (1) page's mapping marked unevictable
3764 * (2) page is part of an mlocked VMA
3765 *
3766 */
3767int page_evictable(struct page *page)
3768{
3769	return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3770}
3771
3772#ifdef CONFIG_SHMEM
3773/**
3774 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3775 * @pages:	array of pages to check
3776 * @nr_pages:	number of pages to check
3777 *
3778 * Checks pages for evictability and moves them to the appropriate lru list.
3779 *
3780 * This function is only used for SysV IPC SHM_UNLOCK.
3781 */
3782void check_move_unevictable_pages(struct page **pages, int nr_pages)
3783{
3784	struct lruvec *lruvec;
3785	struct zone *zone = NULL;
3786	int pgscanned = 0;
3787	int pgrescued = 0;
3788	int i;
3789
3790	for (i = 0; i < nr_pages; i++) {
3791		struct page *page = pages[i];
3792		struct zone *pagezone;
3793
3794		pgscanned++;
3795		pagezone = page_zone(page);
3796		if (pagezone != zone) {
3797			if (zone)
3798				spin_unlock_irq(&zone->lru_lock);
3799			zone = pagezone;
3800			spin_lock_irq(&zone->lru_lock);
3801		}
3802		lruvec = mem_cgroup_page_lruvec(page, zone);
3803
3804		if (!PageLRU(page) || !PageUnevictable(page))
3805			continue;
3806
3807		if (page_evictable(page)) {
3808			enum lru_list lru = page_lru_base_type(page);
3809
3810			VM_BUG_ON_PAGE(PageActive(page), page);
3811			ClearPageUnevictable(page);
3812			del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3813			add_page_to_lru_list(page, lruvec, lru);
3814			pgrescued++;
3815		}
3816	}
3817
3818	if (zone) {
3819		__count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3820		__count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3821		spin_unlock_irq(&zone->lru_lock);
3822	}
3823}
3824#endif /* CONFIG_SHMEM */
3825