This source file includes following definitions.
- __page_cache_release
- __put_single_page
- __put_compound_page
- __put_page
- put_pages_list
- get_kernel_pages
- get_kernel_page
- pagevec_lru_move_fn
- pagevec_move_tail_fn
- pagevec_move_tail
- rotate_reclaimable_page
- update_page_reclaim_stat
- __activate_page
- activate_page_drain
- need_activate_page_drain
- activate_page
- activate_page_drain
- activate_page
- __lru_cache_activate_page
- mark_page_accessed
- __lru_cache_add
- lru_cache_add_anon
- lru_cache_add_file
- lru_cache_add
- lru_cache_add_active_or_unevictable
- lru_deactivate_file_fn
- lru_deactivate_fn
- lru_lazyfree_fn
- lru_add_drain_cpu
- deactivate_file_page
- deactivate_page
- mark_page_lazyfree
- lru_add_drain
- lru_add_drain_per_cpu
- lru_add_drain_all
- lru_add_drain_all
- release_pages
- __pagevec_release
- lru_add_page_tail
- __pagevec_lru_add_fn
- __pagevec_lru_add
- pagevec_lookup_entries
- pagevec_remove_exceptionals
- pagevec_lookup_range
- pagevec_lookup_range_tag
- pagevec_lookup_range_nr_tag
- swap_setup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38
39 #include "internal.h"
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/pagemap.h>
43
44
45 int page_cluster;
46
47 static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
48 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
49 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
50 static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
51 static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
52 #ifdef CONFIG_SMP
53 static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
54 #endif
55
56
57
58
59
60 static void __page_cache_release(struct page *page)
61 {
62 if (PageLRU(page)) {
63 pg_data_t *pgdat = page_pgdat(page);
64 struct lruvec *lruvec;
65 unsigned long flags;
66
67 spin_lock_irqsave(&pgdat->lru_lock, flags);
68 lruvec = mem_cgroup_page_lruvec(page, pgdat);
69 VM_BUG_ON_PAGE(!PageLRU(page), page);
70 __ClearPageLRU(page);
71 del_page_from_lru_list(page, lruvec, page_off_lru(page));
72 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
73 }
74 __ClearPageWaiters(page);
75 }
76
77 static void __put_single_page(struct page *page)
78 {
79 __page_cache_release(page);
80 mem_cgroup_uncharge(page);
81 free_unref_page(page);
82 }
83
84 static void __put_compound_page(struct page *page)
85 {
86 compound_page_dtor *dtor;
87
88
89
90
91
92
93
94 if (!PageHuge(page))
95 __page_cache_release(page);
96 dtor = get_compound_page_dtor(page);
97 (*dtor)(page);
98 }
99
100 void __put_page(struct page *page)
101 {
102 if (is_zone_device_page(page)) {
103 put_dev_pagemap(page->pgmap);
104
105
106
107
108
109 return;
110 }
111
112 if (unlikely(PageCompound(page)))
113 __put_compound_page(page);
114 else
115 __put_single_page(page);
116 }
117 EXPORT_SYMBOL(__put_page);
118
119
120
121
122
123
124
125
126 void put_pages_list(struct list_head *pages)
127 {
128 while (!list_empty(pages)) {
129 struct page *victim;
130
131 victim = lru_to_page(pages);
132 list_del(&victim->lru);
133 put_page(victim);
134 }
135 }
136 EXPORT_SYMBOL(put_pages_list);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
152 struct page **pages)
153 {
154 int seg;
155
156 for (seg = 0; seg < nr_segs; seg++) {
157 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
158 return seg;
159
160 pages[seg] = kmap_to_page(kiov[seg].iov_base);
161 get_page(pages[seg]);
162 }
163
164 return seg;
165 }
166 EXPORT_SYMBOL_GPL(get_kernel_pages);
167
168
169
170
171
172
173
174
175
176
177
178
179 int get_kernel_page(unsigned long start, int write, struct page **pages)
180 {
181 const struct kvec kiov = {
182 .iov_base = (void *)start,
183 .iov_len = PAGE_SIZE
184 };
185
186 return get_kernel_pages(&kiov, 1, write, pages);
187 }
188 EXPORT_SYMBOL_GPL(get_kernel_page);
189
190 static void pagevec_lru_move_fn(struct pagevec *pvec,
191 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
192 void *arg)
193 {
194 int i;
195 struct pglist_data *pgdat = NULL;
196 struct lruvec *lruvec;
197 unsigned long flags = 0;
198
199 for (i = 0; i < pagevec_count(pvec); i++) {
200 struct page *page = pvec->pages[i];
201 struct pglist_data *pagepgdat = page_pgdat(page);
202
203 if (pagepgdat != pgdat) {
204 if (pgdat)
205 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
206 pgdat = pagepgdat;
207 spin_lock_irqsave(&pgdat->lru_lock, flags);
208 }
209
210 lruvec = mem_cgroup_page_lruvec(page, pgdat);
211 (*move_fn)(page, lruvec, arg);
212 }
213 if (pgdat)
214 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
215 release_pages(pvec->pages, pvec->nr);
216 pagevec_reinit(pvec);
217 }
218
219 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
220 void *arg)
221 {
222 int *pgmoved = arg;
223
224 if (PageLRU(page) && !PageUnevictable(page)) {
225 del_page_from_lru_list(page, lruvec, page_lru(page));
226 ClearPageActive(page);
227 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
228 (*pgmoved)++;
229 }
230 }
231
232
233
234
235
236 static void pagevec_move_tail(struct pagevec *pvec)
237 {
238 int pgmoved = 0;
239
240 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
241 __count_vm_events(PGROTATED, pgmoved);
242 }
243
244
245
246
247
248
249 void rotate_reclaimable_page(struct page *page)
250 {
251 if (!PageLocked(page) && !PageDirty(page) &&
252 !PageUnevictable(page) && PageLRU(page)) {
253 struct pagevec *pvec;
254 unsigned long flags;
255
256 get_page(page);
257 local_irq_save(flags);
258 pvec = this_cpu_ptr(&lru_rotate_pvecs);
259 if (!pagevec_add(pvec, page) || PageCompound(page))
260 pagevec_move_tail(pvec);
261 local_irq_restore(flags);
262 }
263 }
264
265 static void update_page_reclaim_stat(struct lruvec *lruvec,
266 int file, int rotated)
267 {
268 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
269
270 reclaim_stat->recent_scanned[file]++;
271 if (rotated)
272 reclaim_stat->recent_rotated[file]++;
273 }
274
275 static void __activate_page(struct page *page, struct lruvec *lruvec,
276 void *arg)
277 {
278 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
279 int file = page_is_file_cache(page);
280 int lru = page_lru_base_type(page);
281
282 del_page_from_lru_list(page, lruvec, lru);
283 SetPageActive(page);
284 lru += LRU_ACTIVE;
285 add_page_to_lru_list(page, lruvec, lru);
286 trace_mm_lru_activate(page);
287
288 __count_vm_event(PGACTIVATE);
289 update_page_reclaim_stat(lruvec, file, 1);
290 }
291 }
292
293 #ifdef CONFIG_SMP
294 static void activate_page_drain(int cpu)
295 {
296 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
297
298 if (pagevec_count(pvec))
299 pagevec_lru_move_fn(pvec, __activate_page, NULL);
300 }
301
302 static bool need_activate_page_drain(int cpu)
303 {
304 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
305 }
306
307 void activate_page(struct page *page)
308 {
309 page = compound_head(page);
310 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
311 struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
312
313 get_page(page);
314 if (!pagevec_add(pvec, page) || PageCompound(page))
315 pagevec_lru_move_fn(pvec, __activate_page, NULL);
316 put_cpu_var(activate_page_pvecs);
317 }
318 }
319
320 #else
321 static inline void activate_page_drain(int cpu)
322 {
323 }
324
325 void activate_page(struct page *page)
326 {
327 pg_data_t *pgdat = page_pgdat(page);
328
329 page = compound_head(page);
330 spin_lock_irq(&pgdat->lru_lock);
331 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
332 spin_unlock_irq(&pgdat->lru_lock);
333 }
334 #endif
335
336 static void __lru_cache_activate_page(struct page *page)
337 {
338 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
339 int i;
340
341
342
343
344
345
346
347
348
349
350
351 for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
352 struct page *pagevec_page = pvec->pages[i];
353
354 if (pagevec_page == page) {
355 SetPageActive(page);
356 break;
357 }
358 }
359
360 put_cpu_var(lru_add_pvec);
361 }
362
363
364
365
366
367
368
369
370
371
372
373 void mark_page_accessed(struct page *page)
374 {
375 page = compound_head(page);
376 if (!PageActive(page) && !PageUnevictable(page) &&
377 PageReferenced(page)) {
378
379
380
381
382
383
384
385 if (PageLRU(page))
386 activate_page(page);
387 else
388 __lru_cache_activate_page(page);
389 ClearPageReferenced(page);
390 if (page_is_file_cache(page))
391 workingset_activation(page);
392 } else if (!PageReferenced(page)) {
393 SetPageReferenced(page);
394 }
395 if (page_is_idle(page))
396 clear_page_idle(page);
397 }
398 EXPORT_SYMBOL(mark_page_accessed);
399
400 static void __lru_cache_add(struct page *page)
401 {
402 struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
403
404 get_page(page);
405 if (!pagevec_add(pvec, page) || PageCompound(page))
406 __pagevec_lru_add(pvec);
407 put_cpu_var(lru_add_pvec);
408 }
409
410
411
412
413
414 void lru_cache_add_anon(struct page *page)
415 {
416 if (PageActive(page))
417 ClearPageActive(page);
418 __lru_cache_add(page);
419 }
420
421 void lru_cache_add_file(struct page *page)
422 {
423 if (PageActive(page))
424 ClearPageActive(page);
425 __lru_cache_add(page);
426 }
427 EXPORT_SYMBOL(lru_cache_add_file);
428
429
430
431
432
433
434
435
436
437
438 void lru_cache_add(struct page *page)
439 {
440 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
441 VM_BUG_ON_PAGE(PageLRU(page), page);
442 __lru_cache_add(page);
443 }
444
445
446
447
448
449
450
451
452
453
454
455 void lru_cache_add_active_or_unevictable(struct page *page,
456 struct vm_area_struct *vma)
457 {
458 VM_BUG_ON_PAGE(PageLRU(page), page);
459
460 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
461 SetPageActive(page);
462 else if (!TestSetPageMlocked(page)) {
463
464
465
466
467
468 __mod_zone_page_state(page_zone(page), NR_MLOCK,
469 hpage_nr_pages(page));
470 count_vm_event(UNEVICTABLE_PGMLOCKED);
471 }
472 lru_cache_add(page);
473 }
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
497 void *arg)
498 {
499 int lru, file;
500 bool active;
501
502 if (!PageLRU(page))
503 return;
504
505 if (PageUnevictable(page))
506 return;
507
508
509 if (page_mapped(page))
510 return;
511
512 active = PageActive(page);
513 file = page_is_file_cache(page);
514 lru = page_lru_base_type(page);
515
516 del_page_from_lru_list(page, lruvec, lru + active);
517 ClearPageActive(page);
518 ClearPageReferenced(page);
519
520 if (PageWriteback(page) || PageDirty(page)) {
521
522
523
524
525
526 add_page_to_lru_list(page, lruvec, lru);
527 SetPageReclaim(page);
528 } else {
529
530
531
532
533 add_page_to_lru_list_tail(page, lruvec, lru);
534 __count_vm_event(PGROTATED);
535 }
536
537 if (active)
538 __count_vm_event(PGDEACTIVATE);
539 update_page_reclaim_stat(lruvec, file, 0);
540 }
541
542 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
543 void *arg)
544 {
545 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
546 int file = page_is_file_cache(page);
547 int lru = page_lru_base_type(page);
548
549 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
550 ClearPageActive(page);
551 ClearPageReferenced(page);
552 add_page_to_lru_list(page, lruvec, lru);
553
554 __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page));
555 update_page_reclaim_stat(lruvec, file, 0);
556 }
557 }
558
559 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
560 void *arg)
561 {
562 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
563 !PageSwapCache(page) && !PageUnevictable(page)) {
564 bool active = PageActive(page);
565
566 del_page_from_lru_list(page, lruvec,
567 LRU_INACTIVE_ANON + active);
568 ClearPageActive(page);
569 ClearPageReferenced(page);
570
571
572
573
574
575 ClearPageSwapBacked(page);
576 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
577
578 __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
579 count_memcg_page_event(page, PGLAZYFREE);
580 update_page_reclaim_stat(lruvec, 1, 0);
581 }
582 }
583
584
585
586
587
588
589 void lru_add_drain_cpu(int cpu)
590 {
591 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
592
593 if (pagevec_count(pvec))
594 __pagevec_lru_add(pvec);
595
596 pvec = &per_cpu(lru_rotate_pvecs, cpu);
597 if (pagevec_count(pvec)) {
598 unsigned long flags;
599
600
601 local_irq_save(flags);
602 pagevec_move_tail(pvec);
603 local_irq_restore(flags);
604 }
605
606 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
607 if (pagevec_count(pvec))
608 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
609
610 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
611 if (pagevec_count(pvec))
612 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
613
614 pvec = &per_cpu(lru_lazyfree_pvecs, cpu);
615 if (pagevec_count(pvec))
616 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
617
618 activate_page_drain(cpu);
619 }
620
621
622
623
624
625
626
627
628
629 void deactivate_file_page(struct page *page)
630 {
631
632
633
634
635 if (PageUnevictable(page))
636 return;
637
638 if (likely(get_page_unless_zero(page))) {
639 struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
640
641 if (!pagevec_add(pvec, page) || PageCompound(page))
642 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
643 put_cpu_var(lru_deactivate_file_pvecs);
644 }
645 }
646
647
648
649
650
651
652
653
654
655 void deactivate_page(struct page *page)
656 {
657 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
658 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
659
660 get_page(page);
661 if (!pagevec_add(pvec, page) || PageCompound(page))
662 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
663 put_cpu_var(lru_deactivate_pvecs);
664 }
665 }
666
667
668
669
670
671
672
673
674 void mark_page_lazyfree(struct page *page)
675 {
676 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
677 !PageSwapCache(page) && !PageUnevictable(page)) {
678 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
679
680 get_page(page);
681 if (!pagevec_add(pvec, page) || PageCompound(page))
682 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
683 put_cpu_var(lru_lazyfree_pvecs);
684 }
685 }
686
687 void lru_add_drain(void)
688 {
689 lru_add_drain_cpu(get_cpu());
690 put_cpu();
691 }
692
693 #ifdef CONFIG_SMP
694
695 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
696
697 static void lru_add_drain_per_cpu(struct work_struct *dummy)
698 {
699 lru_add_drain();
700 }
701
702
703
704
705
706
707
708
709 void lru_add_drain_all(void)
710 {
711 static DEFINE_MUTEX(lock);
712 static struct cpumask has_work;
713 int cpu;
714
715
716
717
718
719 if (WARN_ON(!mm_percpu_wq))
720 return;
721
722 mutex_lock(&lock);
723 cpumask_clear(&has_work);
724
725 for_each_online_cpu(cpu) {
726 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
727
728 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
729 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
730 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
731 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
732 pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) ||
733 need_activate_page_drain(cpu)) {
734 INIT_WORK(work, lru_add_drain_per_cpu);
735 queue_work_on(cpu, mm_percpu_wq, work);
736 cpumask_set_cpu(cpu, &has_work);
737 }
738 }
739
740 for_each_cpu(cpu, &has_work)
741 flush_work(&per_cpu(lru_add_drain_work, cpu));
742
743 mutex_unlock(&lock);
744 }
745 #else
746 void lru_add_drain_all(void)
747 {
748 lru_add_drain();
749 }
750 #endif
751
752
753
754
755
756
757
758
759
760 void release_pages(struct page **pages, int nr)
761 {
762 int i;
763 LIST_HEAD(pages_to_free);
764 struct pglist_data *locked_pgdat = NULL;
765 struct lruvec *lruvec;
766 unsigned long uninitialized_var(flags);
767 unsigned int uninitialized_var(lock_batch);
768
769 for (i = 0; i < nr; i++) {
770 struct page *page = pages[i];
771
772
773
774
775
776
777 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
778 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
779 locked_pgdat = NULL;
780 }
781
782 if (is_huge_zero_page(page))
783 continue;
784
785 if (is_zone_device_page(page)) {
786 if (locked_pgdat) {
787 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
788 flags);
789 locked_pgdat = NULL;
790 }
791
792
793
794
795
796
797 if (put_devmap_managed_page(page))
798 continue;
799 }
800
801 page = compound_head(page);
802 if (!put_page_testzero(page))
803 continue;
804
805 if (PageCompound(page)) {
806 if (locked_pgdat) {
807 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
808 locked_pgdat = NULL;
809 }
810 __put_compound_page(page);
811 continue;
812 }
813
814 if (PageLRU(page)) {
815 struct pglist_data *pgdat = page_pgdat(page);
816
817 if (pgdat != locked_pgdat) {
818 if (locked_pgdat)
819 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
820 flags);
821 lock_batch = 0;
822 locked_pgdat = pgdat;
823 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
824 }
825
826 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
827 VM_BUG_ON_PAGE(!PageLRU(page), page);
828 __ClearPageLRU(page);
829 del_page_from_lru_list(page, lruvec, page_off_lru(page));
830 }
831
832
833 __ClearPageActive(page);
834 __ClearPageWaiters(page);
835
836 list_add(&page->lru, &pages_to_free);
837 }
838 if (locked_pgdat)
839 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
840
841 mem_cgroup_uncharge_list(&pages_to_free);
842 free_unref_page_list(&pages_to_free);
843 }
844 EXPORT_SYMBOL(release_pages);
845
846
847
848
849
850
851
852
853
854
855
856 void __pagevec_release(struct pagevec *pvec)
857 {
858 if (!pvec->percpu_pvec_drained) {
859 lru_add_drain();
860 pvec->percpu_pvec_drained = true;
861 }
862 release_pages(pvec->pages, pagevec_count(pvec));
863 pagevec_reinit(pvec);
864 }
865 EXPORT_SYMBOL(__pagevec_release);
866
867 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
868
869 void lru_add_page_tail(struct page *page, struct page *page_tail,
870 struct lruvec *lruvec, struct list_head *list)
871 {
872 const int file = 0;
873
874 VM_BUG_ON_PAGE(!PageHead(page), page);
875 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
876 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
877 lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
878
879 if (!list)
880 SetPageLRU(page_tail);
881
882 if (likely(PageLRU(page)))
883 list_add_tail(&page_tail->lru, &page->lru);
884 else if (list) {
885
886 get_page(page_tail);
887 list_add_tail(&page_tail->lru, list);
888 } else {
889
890
891
892
893
894
895
896 add_page_to_lru_list_tail(page_tail, lruvec,
897 page_lru(page_tail));
898 }
899
900 if (!PageUnevictable(page))
901 update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
902 }
903 #endif
904
905 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
906 void *arg)
907 {
908 enum lru_list lru;
909 int was_unevictable = TestClearPageUnevictable(page);
910
911 VM_BUG_ON_PAGE(PageLRU(page), page);
912
913 SetPageLRU(page);
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 smp_mb();
941
942 if (page_evictable(page)) {
943 lru = page_lru(page);
944 update_page_reclaim_stat(lruvec, page_is_file_cache(page),
945 PageActive(page));
946 if (was_unevictable)
947 count_vm_event(UNEVICTABLE_PGRESCUED);
948 } else {
949 lru = LRU_UNEVICTABLE;
950 ClearPageActive(page);
951 SetPageUnevictable(page);
952 if (!was_unevictable)
953 count_vm_event(UNEVICTABLE_PGCULLED);
954 }
955
956 add_page_to_lru_list(page, lruvec, lru);
957 trace_mm_lru_insertion(page, lru);
958 }
959
960
961
962
963
964 void __pagevec_lru_add(struct pagevec *pvec)
965 {
966 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
967 }
968 EXPORT_SYMBOL(__pagevec_lru_add);
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990 unsigned pagevec_lookup_entries(struct pagevec *pvec,
991 struct address_space *mapping,
992 pgoff_t start, unsigned nr_entries,
993 pgoff_t *indices)
994 {
995 pvec->nr = find_get_entries(mapping, start, nr_entries,
996 pvec->pages, indices);
997 return pagevec_count(pvec);
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 void pagevec_remove_exceptionals(struct pagevec *pvec)
1010 {
1011 int i, j;
1012
1013 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1014 struct page *page = pvec->pages[i];
1015 if (!xa_is_value(page))
1016 pvec->pages[j++] = page;
1017 }
1018 pvec->nr = j;
1019 }
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 unsigned pagevec_lookup_range(struct pagevec *pvec,
1042 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1043 {
1044 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1045 pvec->pages);
1046 return pagevec_count(pvec);
1047 }
1048 EXPORT_SYMBOL(pagevec_lookup_range);
1049
1050 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1051 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1052 xa_mark_t tag)
1053 {
1054 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1055 PAGEVEC_SIZE, pvec->pages);
1056 return pagevec_count(pvec);
1057 }
1058 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1059
1060 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1061 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1062 xa_mark_t tag, unsigned max_pages)
1063 {
1064 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1065 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1066 return pagevec_count(pvec);
1067 }
1068 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1069
1070
1071
1072 void __init swap_setup(void)
1073 {
1074 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1075
1076
1077 if (megs < 16)
1078 page_cluster = 2;
1079 else
1080 page_cluster = 3;
1081
1082
1083
1084
1085 }