This source file includes following definitions.
- hwpoison_filter_dev
- hwpoison_filter_flags
- hwpoison_filter_task
- hwpoison_filter_task
- hwpoison_filter
- hwpoison_filter
- kill_proc
- shake_page
- dev_pagemap_mapping_shift
- add_to_kill
- kill_procs
- find_early_kill_thread
- task_early_kill
- collect_procs_anon
- collect_procs_file
- collect_procs
- delete_from_lru_cache
- truncate_error_page
- me_kernel
- me_unknown
- me_pagecache_clean
- me_pagecache_dirty
- me_swapcache_dirty
- me_swapcache_clean
- me_huge_page
- action_result
- page_action
- get_hwpoison_page
- hwpoison_user_mappings
- identify_page_state
- memory_failure_hugetlb
- memory_failure_dev_pagemap
- memory_failure
- memory_failure_queue
- memory_failure_work_func
- memory_failure_init
- unpoison_memory
- new_page
- __get_any_page
- get_any_page
- soft_offline_huge_page
- __soft_offline_page
- soft_offline_in_use_page
- soft_offline_free_page
- soft_offline_page
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/kernel.h>
37 #include <linux/mm.h>
38 #include <linux/page-flags.h>
39 #include <linux/kernel-page-flags.h>
40 #include <linux/sched/signal.h>
41 #include <linux/sched/task.h>
42 #include <linux/ksm.h>
43 #include <linux/rmap.h>
44 #include <linux/export.h>
45 #include <linux/pagemap.h>
46 #include <linux/swap.h>
47 #include <linux/backing-dev.h>
48 #include <linux/migrate.h>
49 #include <linux/suspend.h>
50 #include <linux/slab.h>
51 #include <linux/swapops.h>
52 #include <linux/hugetlb.h>
53 #include <linux/memory_hotplug.h>
54 #include <linux/mm_inline.h>
55 #include <linux/memremap.h>
56 #include <linux/kfifo.h>
57 #include <linux/ratelimit.h>
58 #include <linux/page-isolation.h>
59 #include "internal.h"
60 #include "ras/ras_event.h"
61
62 int sysctl_memory_failure_early_kill __read_mostly = 0;
63
64 int sysctl_memory_failure_recovery __read_mostly = 1;
65
66 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
67
68 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69
70 u32 hwpoison_filter_enable = 0;
71 u32 hwpoison_filter_dev_major = ~0U;
72 u32 hwpoison_filter_dev_minor = ~0U;
73 u64 hwpoison_filter_flags_mask;
74 u64 hwpoison_filter_flags_value;
75 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
76 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
77 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
78 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
79 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
80
81 static int hwpoison_filter_dev(struct page *p)
82 {
83 struct address_space *mapping;
84 dev_t dev;
85
86 if (hwpoison_filter_dev_major == ~0U &&
87 hwpoison_filter_dev_minor == ~0U)
88 return 0;
89
90
91
92
93 if (PageSlab(p))
94 return -EINVAL;
95
96 mapping = page_mapping(p);
97 if (mapping == NULL || mapping->host == NULL)
98 return -EINVAL;
99
100 dev = mapping->host->i_sb->s_dev;
101 if (hwpoison_filter_dev_major != ~0U &&
102 hwpoison_filter_dev_major != MAJOR(dev))
103 return -EINVAL;
104 if (hwpoison_filter_dev_minor != ~0U &&
105 hwpoison_filter_dev_minor != MINOR(dev))
106 return -EINVAL;
107
108 return 0;
109 }
110
111 static int hwpoison_filter_flags(struct page *p)
112 {
113 if (!hwpoison_filter_flags_mask)
114 return 0;
115
116 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
117 hwpoison_filter_flags_value)
118 return 0;
119 else
120 return -EINVAL;
121 }
122
123
124
125
126
127
128
129
130
131
132
133 #ifdef CONFIG_MEMCG
134 u64 hwpoison_filter_memcg;
135 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
136 static int hwpoison_filter_task(struct page *p)
137 {
138 if (!hwpoison_filter_memcg)
139 return 0;
140
141 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
142 return -EINVAL;
143
144 return 0;
145 }
146 #else
147 static int hwpoison_filter_task(struct page *p) { return 0; }
148 #endif
149
150 int hwpoison_filter(struct page *p)
151 {
152 if (!hwpoison_filter_enable)
153 return 0;
154
155 if (hwpoison_filter_dev(p))
156 return -EINVAL;
157
158 if (hwpoison_filter_flags(p))
159 return -EINVAL;
160
161 if (hwpoison_filter_task(p))
162 return -EINVAL;
163
164 return 0;
165 }
166 #else
167 int hwpoison_filter(struct page *p)
168 {
169 return 0;
170 }
171 #endif
172
173 EXPORT_SYMBOL_GPL(hwpoison_filter);
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197 struct to_kill {
198 struct list_head nd;
199 struct task_struct *tsk;
200 unsigned long addr;
201 short size_shift;
202 };
203
204
205
206
207
208
209 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
210 {
211 struct task_struct *t = tk->tsk;
212 short addr_lsb = tk->size_shift;
213 int ret;
214
215 pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
216 pfn, t->comm, t->pid);
217
218 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
219 ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr,
220 addr_lsb);
221 } else {
222
223
224
225
226
227
228 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
229 addr_lsb, t);
230 }
231 if (ret < 0)
232 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
233 t->comm, t->pid, ret);
234 return ret;
235 }
236
237
238
239
240
241 void shake_page(struct page *p, int access)
242 {
243 if (PageHuge(p))
244 return;
245
246 if (!PageSlab(p)) {
247 lru_add_drain_all();
248 if (PageLRU(p))
249 return;
250 drain_all_pages(page_zone(p));
251 if (PageLRU(p) || is_free_buddy_page(p))
252 return;
253 }
254
255
256
257
258
259 if (access)
260 drop_slab_node(page_to_nid(p));
261 }
262 EXPORT_SYMBOL_GPL(shake_page);
263
264 static unsigned long dev_pagemap_mapping_shift(struct page *page,
265 struct vm_area_struct *vma)
266 {
267 unsigned long address = vma_address(page, vma);
268 pgd_t *pgd;
269 p4d_t *p4d;
270 pud_t *pud;
271 pmd_t *pmd;
272 pte_t *pte;
273
274 pgd = pgd_offset(vma->vm_mm, address);
275 if (!pgd_present(*pgd))
276 return 0;
277 p4d = p4d_offset(pgd, address);
278 if (!p4d_present(*p4d))
279 return 0;
280 pud = pud_offset(p4d, address);
281 if (!pud_present(*pud))
282 return 0;
283 if (pud_devmap(*pud))
284 return PUD_SHIFT;
285 pmd = pmd_offset(pud, address);
286 if (!pmd_present(*pmd))
287 return 0;
288 if (pmd_devmap(*pmd))
289 return PMD_SHIFT;
290 pte = pte_offset_map(pmd, address);
291 if (!pte_present(*pte))
292 return 0;
293 if (pte_devmap(*pte))
294 return PAGE_SHIFT;
295 return 0;
296 }
297
298
299
300
301
302
303
304
305
306
307
308 static void add_to_kill(struct task_struct *tsk, struct page *p,
309 struct vm_area_struct *vma,
310 struct list_head *to_kill,
311 struct to_kill **tkc)
312 {
313 struct to_kill *tk;
314
315 if (*tkc) {
316 tk = *tkc;
317 *tkc = NULL;
318 } else {
319 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
320 if (!tk) {
321 pr_err("Memory failure: Out of memory while machine check handling\n");
322 return;
323 }
324 }
325 tk->addr = page_address_in_vma(p, vma);
326 if (is_zone_device_page(p))
327 tk->size_shift = dev_pagemap_mapping_shift(p, vma);
328 else
329 tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
330
331
332
333
334
335
336
337
338
339
340
341 if (tk->addr == -EFAULT) {
342 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
343 page_to_pfn(p), tsk->comm);
344 } else if (tk->size_shift == 0) {
345 kfree(tk);
346 return;
347 }
348 get_task_struct(tsk);
349 tk->tsk = tsk;
350 list_add_tail(&tk->nd, to_kill);
351 }
352
353
354
355
356
357
358
359
360
361 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
362 unsigned long pfn, int flags)
363 {
364 struct to_kill *tk, *next;
365
366 list_for_each_entry_safe (tk, next, to_kill, nd) {
367 if (forcekill) {
368
369
370
371
372
373 if (fail || tk->addr == -EFAULT) {
374 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
375 pfn, tk->tsk->comm, tk->tsk->pid);
376 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
377 tk->tsk, PIDTYPE_PID);
378 }
379
380
381
382
383
384
385
386 else if (kill_proc(tk, pfn, flags) < 0)
387 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
388 pfn, tk->tsk->comm, tk->tsk->pid);
389 }
390 put_task_struct(tk->tsk);
391 kfree(tk);
392 }
393 }
394
395
396
397
398
399
400
401
402
403 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
404 {
405 struct task_struct *t;
406
407 for_each_thread(tsk, t)
408 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
409 return t;
410 return NULL;
411 }
412
413
414
415
416
417
418
419 static struct task_struct *task_early_kill(struct task_struct *tsk,
420 int force_early)
421 {
422 struct task_struct *t;
423 if (!tsk->mm)
424 return NULL;
425 if (force_early)
426 return tsk;
427 t = find_early_kill_thread(tsk);
428 if (t)
429 return t;
430 if (sysctl_memory_failure_early_kill)
431 return tsk;
432 return NULL;
433 }
434
435
436
437
438 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
439 struct to_kill **tkc, int force_early)
440 {
441 struct vm_area_struct *vma;
442 struct task_struct *tsk;
443 struct anon_vma *av;
444 pgoff_t pgoff;
445
446 av = page_lock_anon_vma_read(page);
447 if (av == NULL)
448 return;
449
450 pgoff = page_to_pgoff(page);
451 read_lock(&tasklist_lock);
452 for_each_process (tsk) {
453 struct anon_vma_chain *vmac;
454 struct task_struct *t = task_early_kill(tsk, force_early);
455
456 if (!t)
457 continue;
458 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
459 pgoff, pgoff) {
460 vma = vmac->vma;
461 if (!page_mapped_in_vma(page, vma))
462 continue;
463 if (vma->vm_mm == t->mm)
464 add_to_kill(t, page, vma, to_kill, tkc);
465 }
466 }
467 read_unlock(&tasklist_lock);
468 page_unlock_anon_vma_read(av);
469 }
470
471
472
473
474 static void collect_procs_file(struct page *page, struct list_head *to_kill,
475 struct to_kill **tkc, int force_early)
476 {
477 struct vm_area_struct *vma;
478 struct task_struct *tsk;
479 struct address_space *mapping = page->mapping;
480
481 i_mmap_lock_read(mapping);
482 read_lock(&tasklist_lock);
483 for_each_process(tsk) {
484 pgoff_t pgoff = page_to_pgoff(page);
485 struct task_struct *t = task_early_kill(tsk, force_early);
486
487 if (!t)
488 continue;
489 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
490 pgoff) {
491
492
493
494
495
496
497
498 if (vma->vm_mm == t->mm)
499 add_to_kill(t, page, vma, to_kill, tkc);
500 }
501 }
502 read_unlock(&tasklist_lock);
503 i_mmap_unlock_read(mapping);
504 }
505
506
507
508
509
510
511
512 static void collect_procs(struct page *page, struct list_head *tokill,
513 int force_early)
514 {
515 struct to_kill *tk;
516
517 if (!page->mapping)
518 return;
519
520 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
521 if (!tk)
522 return;
523 if (PageAnon(page))
524 collect_procs_anon(page, tokill, &tk, force_early);
525 else
526 collect_procs_file(page, tokill, &tk, force_early);
527 kfree(tk);
528 }
529
530 static const char *action_name[] = {
531 [MF_IGNORED] = "Ignored",
532 [MF_FAILED] = "Failed",
533 [MF_DELAYED] = "Delayed",
534 [MF_RECOVERED] = "Recovered",
535 };
536
537 static const char * const action_page_types[] = {
538 [MF_MSG_KERNEL] = "reserved kernel page",
539 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
540 [MF_MSG_SLAB] = "kernel slab page",
541 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
542 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
543 [MF_MSG_HUGE] = "huge page",
544 [MF_MSG_FREE_HUGE] = "free huge page",
545 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
546 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
547 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
548 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
549 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
550 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
551 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
552 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
553 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
554 [MF_MSG_CLEAN_LRU] = "clean LRU page",
555 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
556 [MF_MSG_BUDDY] = "free buddy page",
557 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
558 [MF_MSG_DAX] = "dax page",
559 [MF_MSG_UNKNOWN] = "unknown page",
560 };
561
562
563
564
565
566
567
568 static int delete_from_lru_cache(struct page *p)
569 {
570 if (!isolate_lru_page(p)) {
571
572
573
574
575 ClearPageActive(p);
576 ClearPageUnevictable(p);
577
578
579
580
581
582 mem_cgroup_uncharge(p);
583
584
585
586
587 put_page(p);
588 return 0;
589 }
590 return -EIO;
591 }
592
593 static int truncate_error_page(struct page *p, unsigned long pfn,
594 struct address_space *mapping)
595 {
596 int ret = MF_FAILED;
597
598 if (mapping->a_ops->error_remove_page) {
599 int err = mapping->a_ops->error_remove_page(mapping, p);
600
601 if (err != 0) {
602 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
603 pfn, err);
604 } else if (page_has_private(p) &&
605 !try_to_release_page(p, GFP_NOIO)) {
606 pr_info("Memory failure: %#lx: failed to release buffers\n",
607 pfn);
608 } else {
609 ret = MF_RECOVERED;
610 }
611 } else {
612
613
614
615
616 if (invalidate_inode_page(p))
617 ret = MF_RECOVERED;
618 else
619 pr_info("Memory failure: %#lx: Failed to invalidate\n",
620 pfn);
621 }
622
623 return ret;
624 }
625
626
627
628
629
630
631 static int me_kernel(struct page *p, unsigned long pfn)
632 {
633 return MF_IGNORED;
634 }
635
636
637
638
639 static int me_unknown(struct page *p, unsigned long pfn)
640 {
641 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
642 return MF_FAILED;
643 }
644
645
646
647
648 static int me_pagecache_clean(struct page *p, unsigned long pfn)
649 {
650 struct address_space *mapping;
651
652 delete_from_lru_cache(p);
653
654
655
656
657
658 if (PageAnon(p))
659 return MF_RECOVERED;
660
661
662
663
664
665
666
667
668 mapping = page_mapping(p);
669 if (!mapping) {
670
671
672
673 return MF_FAILED;
674 }
675
676
677
678
679
680
681 return truncate_error_page(p, pfn, mapping);
682 }
683
684
685
686
687
688
689 static int me_pagecache_dirty(struct page *p, unsigned long pfn)
690 {
691 struct address_space *mapping = page_mapping(p);
692
693 SetPageError(p);
694
695 if (mapping) {
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730 mapping_set_error(mapping, -EIO);
731 }
732
733 return me_pagecache_clean(p, pfn);
734 }
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755 static int me_swapcache_dirty(struct page *p, unsigned long pfn)
756 {
757 ClearPageDirty(p);
758
759 ClearPageUptodate(p);
760
761 if (!delete_from_lru_cache(p))
762 return MF_DELAYED;
763 else
764 return MF_FAILED;
765 }
766
767 static int me_swapcache_clean(struct page *p, unsigned long pfn)
768 {
769 delete_from_swap_cache(p);
770
771 if (!delete_from_lru_cache(p))
772 return MF_RECOVERED;
773 else
774 return MF_FAILED;
775 }
776
777
778
779
780
781
782
783 static int me_huge_page(struct page *p, unsigned long pfn)
784 {
785 int res = 0;
786 struct page *hpage = compound_head(p);
787 struct address_space *mapping;
788
789 if (!PageHuge(hpage))
790 return MF_DELAYED;
791
792 mapping = page_mapping(hpage);
793 if (mapping) {
794 res = truncate_error_page(hpage, pfn, mapping);
795 } else {
796 unlock_page(hpage);
797
798
799
800
801
802 if (PageAnon(hpage))
803 put_page(hpage);
804 dissolve_free_huge_page(p);
805 res = MF_RECOVERED;
806 lock_page(hpage);
807 }
808
809 return res;
810 }
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825 #define dirty (1UL << PG_dirty)
826 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
827 #define unevict (1UL << PG_unevictable)
828 #define mlock (1UL << PG_mlocked)
829 #define writeback (1UL << PG_writeback)
830 #define lru (1UL << PG_lru)
831 #define head (1UL << PG_head)
832 #define slab (1UL << PG_slab)
833 #define reserved (1UL << PG_reserved)
834
835 static struct page_state {
836 unsigned long mask;
837 unsigned long res;
838 enum mf_action_page_type type;
839 int (*action)(struct page *p, unsigned long pfn);
840 } error_states[] = {
841 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
842
843
844
845
846
847
848
849
850
851
852 { slab, slab, MF_MSG_SLAB, me_kernel },
853
854 { head, head, MF_MSG_HUGE, me_huge_page },
855
856 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
857 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
858
859 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
860 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
861
862 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
863 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
864
865 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
866 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
867
868
869
870
871 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
872 };
873
874 #undef dirty
875 #undef sc
876 #undef unevict
877 #undef mlock
878 #undef writeback
879 #undef lru
880 #undef head
881 #undef slab
882 #undef reserved
883
884
885
886
887
888 static void action_result(unsigned long pfn, enum mf_action_page_type type,
889 enum mf_result result)
890 {
891 trace_memory_failure_event(pfn, type, result);
892
893 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
894 pfn, action_page_types[type], action_name[result]);
895 }
896
897 static int page_action(struct page_state *ps, struct page *p,
898 unsigned long pfn)
899 {
900 int result;
901 int count;
902
903 result = ps->action(p, pfn);
904
905 count = page_count(p) - 1;
906 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
907 count--;
908 if (count > 0) {
909 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
910 pfn, action_page_types[ps->type], count);
911 result = MF_FAILED;
912 }
913 action_result(pfn, ps->type, result);
914
915
916
917
918
919
920 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
921 }
922
923
924
925
926
927
928
929
930 int get_hwpoison_page(struct page *page)
931 {
932 struct page *head = compound_head(page);
933
934 if (!PageHuge(head) && PageTransHuge(head)) {
935
936
937
938
939
940
941 if (!PageAnon(head)) {
942 pr_err("Memory failure: %#lx: non anonymous thp\n",
943 page_to_pfn(page));
944 return 0;
945 }
946 }
947
948 if (get_page_unless_zero(head)) {
949 if (head == compound_head(page))
950 return 1;
951
952 pr_info("Memory failure: %#lx cannot catch tail\n",
953 page_to_pfn(page));
954 put_page(head);
955 }
956
957 return 0;
958 }
959 EXPORT_SYMBOL_GPL(get_hwpoison_page);
960
961
962
963
964
965 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966 int flags, struct page **hpagep)
967 {
968 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
969 struct address_space *mapping;
970 LIST_HEAD(tokill);
971 bool unmap_success;
972 int kill = 1, forcekill;
973 struct page *hpage = *hpagep;
974 bool mlocked = PageMlocked(hpage);
975
976
977
978
979
980 if (PageReserved(p) || PageSlab(p))
981 return true;
982 if (!(PageLRU(hpage) || PageHuge(p)))
983 return true;
984
985
986
987
988
989 if (!page_mapped(hpage))
990 return true;
991
992 if (PageKsm(p)) {
993 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
994 return false;
995 }
996
997 if (PageSwapCache(p)) {
998 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
999 pfn);
1000 ttu |= TTU_IGNORE_HWPOISON;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009 mapping = page_mapping(hpage);
1010 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1011 mapping_cap_writeback_dirty(mapping)) {
1012 if (page_mkclean(hpage)) {
1013 SetPageDirty(hpage);
1014 } else {
1015 kill = 0;
1016 ttu |= TTU_IGNORE_HWPOISON;
1017 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
1018 pfn);
1019 }
1020 }
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 if (kill)
1031 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1032
1033 unmap_success = try_to_unmap(hpage, ttu);
1034 if (!unmap_success)
1035 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1036 pfn, page_mapcount(hpage));
1037
1038
1039
1040
1041
1042 if (mlocked)
1043 shake_page(hpage, 0);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1056 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1057
1058 return unmap_success;
1059 }
1060
1061 static int identify_page_state(unsigned long pfn, struct page *p,
1062 unsigned long page_flags)
1063 {
1064 struct page_state *ps;
1065
1066
1067
1068
1069
1070
1071 for (ps = error_states;; ps++)
1072 if ((p->flags & ps->mask) == ps->res)
1073 break;
1074
1075 page_flags |= (p->flags & (1UL << PG_dirty));
1076
1077 if (!ps->mask)
1078 for (ps = error_states;; ps++)
1079 if ((page_flags & ps->mask) == ps->res)
1080 break;
1081 return page_action(ps, p, pfn);
1082 }
1083
1084 static int memory_failure_hugetlb(unsigned long pfn, int flags)
1085 {
1086 struct page *p = pfn_to_page(pfn);
1087 struct page *head = compound_head(p);
1088 int res;
1089 unsigned long page_flags;
1090
1091 if (TestSetPageHWPoison(head)) {
1092 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1093 pfn);
1094 return 0;
1095 }
1096
1097 num_poisoned_pages_inc();
1098
1099 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1100
1101
1102
1103 lock_page(head);
1104 if (PageHWPoison(head)) {
1105 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1106 || (p != head && TestSetPageHWPoison(head))) {
1107 num_poisoned_pages_dec();
1108 unlock_page(head);
1109 return 0;
1110 }
1111 }
1112 unlock_page(head);
1113 dissolve_free_huge_page(p);
1114 action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED);
1115 return 0;
1116 }
1117
1118 lock_page(head);
1119 page_flags = head->flags;
1120
1121 if (!PageHWPoison(head)) {
1122 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1123 num_poisoned_pages_dec();
1124 unlock_page(head);
1125 put_hwpoison_page(head);
1126 return 0;
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1139 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1140 res = -EBUSY;
1141 goto out;
1142 }
1143
1144 if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
1145 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1146 res = -EBUSY;
1147 goto out;
1148 }
1149
1150 res = identify_page_state(pfn, p, page_flags);
1151 out:
1152 unlock_page(head);
1153 return res;
1154 }
1155
1156 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1157 struct dev_pagemap *pgmap)
1158 {
1159 struct page *page = pfn_to_page(pfn);
1160 const bool unmap_success = true;
1161 unsigned long size = 0;
1162 struct to_kill *tk;
1163 LIST_HEAD(tokill);
1164 int rc = -EBUSY;
1165 loff_t start;
1166 dax_entry_t cookie;
1167
1168
1169
1170
1171
1172
1173
1174
1175 cookie = dax_lock_page(page);
1176 if (!cookie)
1177 goto out;
1178
1179 if (hwpoison_filter(page)) {
1180 rc = 0;
1181 goto unlock;
1182 }
1183
1184 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
1185
1186
1187
1188
1189 goto unlock;
1190 }
1191
1192
1193
1194
1195
1196 SetPageHWPoison(page);
1197
1198
1199
1200
1201
1202
1203
1204 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1205 collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
1206
1207 list_for_each_entry(tk, &tokill, nd)
1208 if (tk->size_shift)
1209 size = max(size, 1UL << tk->size_shift);
1210 if (size) {
1211
1212
1213
1214
1215
1216
1217 start = (page->index << PAGE_SHIFT) & ~(size - 1);
1218 unmap_mapping_range(page->mapping, start, start + size, 0);
1219 }
1220 kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
1221 rc = 0;
1222 unlock:
1223 dax_unlock_page(page, cookie);
1224 out:
1225
1226 put_dev_pagemap(pgmap);
1227 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1228 return rc;
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 int memory_failure(unsigned long pfn, int flags)
1249 {
1250 struct page *p;
1251 struct page *hpage;
1252 struct page *orig_head;
1253 struct dev_pagemap *pgmap;
1254 int res;
1255 unsigned long page_flags;
1256
1257 if (!sysctl_memory_failure_recovery)
1258 panic("Memory failure on page %lx", pfn);
1259
1260 p = pfn_to_online_page(pfn);
1261 if (!p) {
1262 if (pfn_valid(pfn)) {
1263 pgmap = get_dev_pagemap(pfn, NULL);
1264 if (pgmap)
1265 return memory_failure_dev_pagemap(pfn, flags,
1266 pgmap);
1267 }
1268 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1269 pfn);
1270 return -ENXIO;
1271 }
1272
1273 if (PageHuge(p))
1274 return memory_failure_hugetlb(pfn, flags);
1275 if (TestSetPageHWPoison(p)) {
1276 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1277 pfn);
1278 return 0;
1279 }
1280
1281 orig_head = hpage = compound_head(p);
1282 num_poisoned_pages_inc();
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
1296 if (is_free_buddy_page(p)) {
1297 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1298 return 0;
1299 } else {
1300 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1301 return -EBUSY;
1302 }
1303 }
1304
1305 if (PageTransHuge(hpage)) {
1306 lock_page(p);
1307 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1308 unlock_page(p);
1309 if (!PageAnon(p))
1310 pr_err("Memory failure: %#lx: non anonymous thp\n",
1311 pfn);
1312 else
1313 pr_err("Memory failure: %#lx: thp split failed\n",
1314 pfn);
1315 if (TestClearPageHWPoison(p))
1316 num_poisoned_pages_dec();
1317 put_hwpoison_page(p);
1318 return -EBUSY;
1319 }
1320 unlock_page(p);
1321 VM_BUG_ON_PAGE(!page_count(p), p);
1322 hpage = compound_head(p);
1323 }
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 shake_page(p, 0);
1334
1335 if (!PageLRU(p) && is_free_buddy_page(p)) {
1336 if (flags & MF_COUNT_INCREASED)
1337 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
1338 else
1339 action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED);
1340 return 0;
1341 }
1342
1343 lock_page(p);
1344
1345
1346
1347
1348
1349 if (PageCompound(p) && compound_head(p) != orig_head) {
1350 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
1351 res = -EBUSY;
1352 goto out;
1353 }
1354
1355
1356
1357
1358
1359
1360
1361
1362 if (PageHuge(p))
1363 page_flags = hpage->flags;
1364 else
1365 page_flags = p->flags;
1366
1367
1368
1369
1370 if (!PageHWPoison(p)) {
1371 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1372 num_poisoned_pages_dec();
1373 unlock_page(p);
1374 put_hwpoison_page(p);
1375 return 0;
1376 }
1377 if (hwpoison_filter(p)) {
1378 if (TestClearPageHWPoison(p))
1379 num_poisoned_pages_dec();
1380 unlock_page(p);
1381 put_hwpoison_page(p);
1382 return 0;
1383 }
1384
1385 if (!PageTransTail(p) && !PageLRU(p))
1386 goto identify_page_state;
1387
1388
1389
1390
1391
1392 wait_on_page_writeback(p);
1393
1394
1395
1396
1397
1398
1399
1400
1401 if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
1402 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1403 res = -EBUSY;
1404 goto out;
1405 }
1406
1407
1408
1409
1410 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1411 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
1412 res = -EBUSY;
1413 goto out;
1414 }
1415
1416 identify_page_state:
1417 res = identify_page_state(pfn, p, page_flags);
1418 out:
1419 unlock_page(p);
1420 return res;
1421 }
1422 EXPORT_SYMBOL_GPL(memory_failure);
1423
1424 #define MEMORY_FAILURE_FIFO_ORDER 4
1425 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1426
1427 struct memory_failure_entry {
1428 unsigned long pfn;
1429 int flags;
1430 };
1431
1432 struct memory_failure_cpu {
1433 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1434 MEMORY_FAILURE_FIFO_SIZE);
1435 spinlock_t lock;
1436 struct work_struct work;
1437 };
1438
1439 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 void memory_failure_queue(unsigned long pfn, int flags)
1458 {
1459 struct memory_failure_cpu *mf_cpu;
1460 unsigned long proc_flags;
1461 struct memory_failure_entry entry = {
1462 .pfn = pfn,
1463 .flags = flags,
1464 };
1465
1466 mf_cpu = &get_cpu_var(memory_failure_cpu);
1467 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1468 if (kfifo_put(&mf_cpu->fifo, entry))
1469 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1470 else
1471 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
1472 pfn);
1473 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1474 put_cpu_var(memory_failure_cpu);
1475 }
1476 EXPORT_SYMBOL_GPL(memory_failure_queue);
1477
1478 static void memory_failure_work_func(struct work_struct *work)
1479 {
1480 struct memory_failure_cpu *mf_cpu;
1481 struct memory_failure_entry entry = { 0, };
1482 unsigned long proc_flags;
1483 int gotten;
1484
1485 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
1486 for (;;) {
1487 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1488 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1489 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1490 if (!gotten)
1491 break;
1492 if (entry.flags & MF_SOFT_OFFLINE)
1493 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1494 else
1495 memory_failure(entry.pfn, entry.flags);
1496 }
1497 }
1498
1499 static int __init memory_failure_init(void)
1500 {
1501 struct memory_failure_cpu *mf_cpu;
1502 int cpu;
1503
1504 for_each_possible_cpu(cpu) {
1505 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1506 spin_lock_init(&mf_cpu->lock);
1507 INIT_KFIFO(mf_cpu->fifo);
1508 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1509 }
1510
1511 return 0;
1512 }
1513 core_initcall(memory_failure_init);
1514
1515 #define unpoison_pr_info(fmt, pfn, rs) \
1516 ({ \
1517 if (__ratelimit(rs)) \
1518 pr_info(fmt, pfn); \
1519 })
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533 int unpoison_memory(unsigned long pfn)
1534 {
1535 struct page *page;
1536 struct page *p;
1537 int freeit = 0;
1538 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1539 DEFAULT_RATELIMIT_BURST);
1540
1541 if (!pfn_valid(pfn))
1542 return -ENXIO;
1543
1544 p = pfn_to_page(pfn);
1545 page = compound_head(p);
1546
1547 if (!PageHWPoison(p)) {
1548 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
1549 pfn, &unpoison_rs);
1550 return 0;
1551 }
1552
1553 if (page_count(page) > 1) {
1554 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
1555 pfn, &unpoison_rs);
1556 return 0;
1557 }
1558
1559 if (page_mapped(page)) {
1560 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
1561 pfn, &unpoison_rs);
1562 return 0;
1563 }
1564
1565 if (page_mapping(page)) {
1566 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
1567 pfn, &unpoison_rs);
1568 return 0;
1569 }
1570
1571
1572
1573
1574
1575
1576 if (!PageHuge(page) && PageTransHuge(page)) {
1577 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
1578 pfn, &unpoison_rs);
1579 return 0;
1580 }
1581
1582 if (!get_hwpoison_page(p)) {
1583 if (TestClearPageHWPoison(p))
1584 num_poisoned_pages_dec();
1585 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
1586 pfn, &unpoison_rs);
1587 return 0;
1588 }
1589
1590 lock_page(page);
1591
1592
1593
1594
1595
1596
1597 if (TestClearPageHWPoison(page)) {
1598 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1599 pfn, &unpoison_rs);
1600 num_poisoned_pages_dec();
1601 freeit = 1;
1602 }
1603 unlock_page(page);
1604
1605 put_hwpoison_page(page);
1606 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
1607 put_hwpoison_page(page);
1608
1609 return 0;
1610 }
1611 EXPORT_SYMBOL(unpoison_memory);
1612
1613 static struct page *new_page(struct page *p, unsigned long private)
1614 {
1615 int nid = page_to_nid(p);
1616
1617 return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
1618 }
1619
1620
1621
1622
1623
1624
1625
1626 static int __get_any_page(struct page *p, unsigned long pfn, int flags)
1627 {
1628 int ret;
1629
1630 if (flags & MF_COUNT_INCREASED)
1631 return 1;
1632
1633
1634
1635
1636
1637 if (!get_hwpoison_page(p)) {
1638 if (PageHuge(p)) {
1639 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1640 ret = 0;
1641 } else if (is_free_buddy_page(p)) {
1642 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1643 ret = 0;
1644 } else {
1645 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1646 __func__, pfn, p->flags);
1647 ret = -EIO;
1648 }
1649 } else {
1650
1651 ret = 1;
1652 }
1653 return ret;
1654 }
1655
1656 static int get_any_page(struct page *page, unsigned long pfn, int flags)
1657 {
1658 int ret = __get_any_page(page, pfn, flags);
1659
1660 if (ret == 1 && !PageHuge(page) &&
1661 !PageLRU(page) && !__PageMovable(page)) {
1662
1663
1664
1665 put_hwpoison_page(page);
1666 shake_page(page, 1);
1667
1668
1669
1670
1671 ret = __get_any_page(page, pfn, 0);
1672 if (ret == 1 && !PageLRU(page)) {
1673
1674 put_hwpoison_page(page);
1675 pr_info("soft_offline: %#lx: unknown non LRU page type %lx (%pGp)\n",
1676 pfn, page->flags, &page->flags);
1677 return -EIO;
1678 }
1679 }
1680 return ret;
1681 }
1682
1683 static int soft_offline_huge_page(struct page *page, int flags)
1684 {
1685 int ret;
1686 unsigned long pfn = page_to_pfn(page);
1687 struct page *hpage = compound_head(page);
1688 LIST_HEAD(pagelist);
1689
1690
1691
1692
1693
1694 lock_page(hpage);
1695 if (PageHWPoison(hpage)) {
1696 unlock_page(hpage);
1697 put_hwpoison_page(hpage);
1698 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
1699 return -EBUSY;
1700 }
1701 unlock_page(hpage);
1702
1703 ret = isolate_huge_page(hpage, &pagelist);
1704
1705
1706
1707
1708 put_hwpoison_page(hpage);
1709 if (!ret) {
1710 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1711 return -EBUSY;
1712 }
1713
1714 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1715 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1716 if (ret) {
1717 pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
1718 pfn, ret, page->flags, &page->flags);
1719 if (!list_empty(&pagelist))
1720 putback_movable_pages(&pagelist);
1721 if (ret > 0)
1722 ret = -EIO;
1723 } else {
1724
1725
1726
1727
1728
1729
1730
1731 ret = dissolve_free_huge_page(page);
1732 if (!ret) {
1733 if (set_hwpoison_free_buddy_page(page))
1734 num_poisoned_pages_inc();
1735 else
1736 ret = -EBUSY;
1737 }
1738 }
1739 return ret;
1740 }
1741
1742 static int __soft_offline_page(struct page *page, int flags)
1743 {
1744 int ret;
1745 unsigned long pfn = page_to_pfn(page);
1746
1747
1748
1749
1750
1751
1752
1753 lock_page(page);
1754 wait_on_page_writeback(page);
1755 if (PageHWPoison(page)) {
1756 unlock_page(page);
1757 put_hwpoison_page(page);
1758 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1759 return -EBUSY;
1760 }
1761
1762
1763
1764
1765 ret = invalidate_inode_page(page);
1766 unlock_page(page);
1767
1768
1769
1770
1771 if (ret == 1) {
1772 put_hwpoison_page(page);
1773 pr_info("soft_offline: %#lx: invalidated\n", pfn);
1774 SetPageHWPoison(page);
1775 num_poisoned_pages_inc();
1776 return 0;
1777 }
1778
1779
1780
1781
1782
1783
1784 if (PageLRU(page))
1785 ret = isolate_lru_page(page);
1786 else
1787 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1788
1789
1790
1791
1792 put_hwpoison_page(page);
1793 if (!ret) {
1794 LIST_HEAD(pagelist);
1795
1796
1797
1798
1799
1800 if (!__PageMovable(page))
1801 inc_node_page_state(page, NR_ISOLATED_ANON +
1802 page_is_file_cache(page));
1803 list_add(&page->lru, &pagelist);
1804 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1805 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1806 if (ret) {
1807 if (!list_empty(&pagelist))
1808 putback_movable_pages(&pagelist);
1809
1810 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
1811 pfn, ret, page->flags, &page->flags);
1812 if (ret > 0)
1813 ret = -EIO;
1814 }
1815 } else {
1816 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx (%pGp)\n",
1817 pfn, ret, page_count(page), page->flags, &page->flags);
1818 }
1819 return ret;
1820 }
1821
1822 static int soft_offline_in_use_page(struct page *page, int flags)
1823 {
1824 int ret;
1825 int mt;
1826 struct page *hpage = compound_head(page);
1827
1828 if (!PageHuge(page) && PageTransHuge(hpage)) {
1829 lock_page(page);
1830 if (!PageAnon(page) || unlikely(split_huge_page(page))) {
1831 unlock_page(page);
1832 if (!PageAnon(page))
1833 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1834 else
1835 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1836 put_hwpoison_page(page);
1837 return -EBUSY;
1838 }
1839 unlock_page(page);
1840 }
1841
1842
1843
1844
1845
1846
1847
1848
1849 mt = get_pageblock_migratetype(page);
1850 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
1851 if (PageHuge(page))
1852 ret = soft_offline_huge_page(page, flags);
1853 else
1854 ret = __soft_offline_page(page, flags);
1855 set_pageblock_migratetype(page, mt);
1856 return ret;
1857 }
1858
1859 static int soft_offline_free_page(struct page *page)
1860 {
1861 int rc = dissolve_free_huge_page(page);
1862
1863 if (!rc) {
1864 if (set_hwpoison_free_buddy_page(page))
1865 num_poisoned_pages_inc();
1866 else
1867 rc = -EBUSY;
1868 }
1869 return rc;
1870 }
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894 int soft_offline_page(struct page *page, int flags)
1895 {
1896 int ret;
1897 unsigned long pfn = page_to_pfn(page);
1898
1899 if (is_zone_device_page(page)) {
1900 pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
1901 pfn);
1902 if (flags & MF_COUNT_INCREASED)
1903 put_page(page);
1904 return -EIO;
1905 }
1906
1907 if (PageHWPoison(page)) {
1908 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1909 if (flags & MF_COUNT_INCREASED)
1910 put_hwpoison_page(page);
1911 return -EBUSY;
1912 }
1913
1914 get_online_mems();
1915 ret = get_any_page(page, pfn, flags);
1916 put_online_mems();
1917
1918 if (ret > 0)
1919 ret = soft_offline_in_use_page(page, flags);
1920 else if (ret == 0)
1921 ret = soft_offline_free_page(page);
1922
1923 return ret;
1924 }