This source file includes following definitions.
- is_memcg_oom
- oom_cpuset_eligible
- oom_cpuset_eligible
- find_lock_task_mm
- is_sysrq_oom
- oom_unkillable_task
- is_dump_unreclaim_slabs
- oom_badness
- constrained_alloc
- oom_evaluate_task
- select_bad_process
- dump_task
- dump_tasks
- dump_oom_summary
- dump_header
- process_shares_mm
- __oom_reap_task_mm
- oom_reap_task_mm
- oom_reap_task
- oom_reaper
- wake_oom_reaper
- oom_init
- subsys_initcall
- mark_oom_victim
- exit_oom_victim
- oom_killer_enable
- oom_killer_disable
- __task_will_free_mem
- task_will_free_mem
- __oom_kill_process
- oom_kill_memcg_member
- oom_kill_process
- check_panic_on_oom
- register_oom_notifier
- unregister_oom_notifier
- out_of_memory
- pagefault_out_of_memory
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/oom.h>
22 #include <linux/mm.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/swap.h>
30 #include <linux/timex.h>
31 #include <linux/jiffies.h>
32 #include <linux/cpuset.h>
33 #include <linux/export.h>
34 #include <linux/notifier.h>
35 #include <linux/memcontrol.h>
36 #include <linux/mempolicy.h>
37 #include <linux/security.h>
38 #include <linux/ptrace.h>
39 #include <linux/freezer.h>
40 #include <linux/ftrace.h>
41 #include <linux/ratelimit.h>
42 #include <linux/kthread.h>
43 #include <linux/init.h>
44 #include <linux/mmu_notifier.h>
45
46 #include <asm/tlb.h>
47 #include "internal.h"
48 #include "slab.h"
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/oom.h>
52
53 int sysctl_panic_on_oom;
54 int sysctl_oom_kill_allocating_task;
55 int sysctl_oom_dump_tasks = 1;
56
57
58
59
60
61
62
63
64
65 DEFINE_MUTEX(oom_lock);
66
67 static inline bool is_memcg_oom(struct oom_control *oc)
68 {
69 return oc->memcg != NULL;
70 }
71
72 #ifdef CONFIG_NUMA
73
74
75
76
77
78
79
80
81
82
83
84
85 static bool oom_cpuset_eligible(struct task_struct *start,
86 struct oom_control *oc)
87 {
88 struct task_struct *tsk;
89 bool ret = false;
90 const nodemask_t *mask = oc->nodemask;
91
92 if (is_memcg_oom(oc))
93 return true;
94
95 rcu_read_lock();
96 for_each_thread(start, tsk) {
97 if (mask) {
98
99
100
101
102
103
104 ret = mempolicy_nodemask_intersects(tsk, mask);
105 } else {
106
107
108
109
110 ret = cpuset_mems_allowed_intersects(current, tsk);
111 }
112 if (ret)
113 break;
114 }
115 rcu_read_unlock();
116
117 return ret;
118 }
119 #else
120 static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
121 {
122 return true;
123 }
124 #endif
125
126
127
128
129
130
131
132 struct task_struct *find_lock_task_mm(struct task_struct *p)
133 {
134 struct task_struct *t;
135
136 rcu_read_lock();
137
138 for_each_thread(p, t) {
139 task_lock(t);
140 if (likely(t->mm))
141 goto found;
142 task_unlock(t);
143 }
144 t = NULL;
145 found:
146 rcu_read_unlock();
147
148 return t;
149 }
150
151
152
153
154
155 static inline bool is_sysrq_oom(struct oom_control *oc)
156 {
157 return oc->order == -1;
158 }
159
160
161 static bool oom_unkillable_task(struct task_struct *p)
162 {
163 if (is_global_init(p))
164 return true;
165 if (p->flags & PF_KTHREAD)
166 return true;
167 return false;
168 }
169
170
171
172
173
174 static bool is_dump_unreclaim_slabs(void)
175 {
176 unsigned long nr_lru;
177
178 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
179 global_node_page_state(NR_INACTIVE_ANON) +
180 global_node_page_state(NR_ACTIVE_FILE) +
181 global_node_page_state(NR_INACTIVE_FILE) +
182 global_node_page_state(NR_ISOLATED_ANON) +
183 global_node_page_state(NR_ISOLATED_FILE) +
184 global_node_page_state(NR_UNEVICTABLE);
185
186 return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
187 }
188
189
190
191
192
193
194
195
196
197
198 unsigned long oom_badness(struct task_struct *p, unsigned long totalpages)
199 {
200 long points;
201 long adj;
202
203 if (oom_unkillable_task(p))
204 return 0;
205
206 p = find_lock_task_mm(p);
207 if (!p)
208 return 0;
209
210
211
212
213
214
215 adj = (long)p->signal->oom_score_adj;
216 if (adj == OOM_SCORE_ADJ_MIN ||
217 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
218 in_vfork(p)) {
219 task_unlock(p);
220 return 0;
221 }
222
223
224
225
226
227 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
228 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
229 task_unlock(p);
230
231
232 adj *= totalpages / 1000;
233 points += adj;
234
235
236
237
238
239 return points > 0 ? points : 1;
240 }
241
242 static const char * const oom_constraint_text[] = {
243 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
244 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
245 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
246 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
247 };
248
249
250
251
252 static enum oom_constraint constrained_alloc(struct oom_control *oc)
253 {
254 struct zone *zone;
255 struct zoneref *z;
256 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
257 bool cpuset_limited = false;
258 int nid;
259
260 if (is_memcg_oom(oc)) {
261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
262 return CONSTRAINT_MEMCG;
263 }
264
265
266 oc->totalpages = totalram_pages() + total_swap_pages;
267
268 if (!IS_ENABLED(CONFIG_NUMA))
269 return CONSTRAINT_NONE;
270
271 if (!oc->zonelist)
272 return CONSTRAINT_NONE;
273
274
275
276
277
278 if (oc->gfp_mask & __GFP_THISNODE)
279 return CONSTRAINT_NONE;
280
281
282
283
284
285
286 if (oc->nodemask &&
287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
288 oc->totalpages = total_swap_pages;
289 for_each_node_mask(nid, *oc->nodemask)
290 oc->totalpages += node_present_pages(nid);
291 return CONSTRAINT_MEMORY_POLICY;
292 }
293
294
295 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
296 high_zoneidx, oc->nodemask)
297 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
298 cpuset_limited = true;
299
300 if (cpuset_limited) {
301 oc->totalpages = total_swap_pages;
302 for_each_node_mask(nid, cpuset_current_mems_allowed)
303 oc->totalpages += node_present_pages(nid);
304 return CONSTRAINT_CPUSET;
305 }
306 return CONSTRAINT_NONE;
307 }
308
309 static int oom_evaluate_task(struct task_struct *task, void *arg)
310 {
311 struct oom_control *oc = arg;
312 unsigned long points;
313
314 if (oom_unkillable_task(task))
315 goto next;
316
317
318 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
319 goto next;
320
321
322
323
324
325
326
327 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
328 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
329 goto next;
330 goto abort;
331 }
332
333
334
335
336
337 if (oom_task_origin(task)) {
338 points = ULONG_MAX;
339 goto select;
340 }
341
342 points = oom_badness(task, oc->totalpages);
343 if (!points || points < oc->chosen_points)
344 goto next;
345
346 select:
347 if (oc->chosen)
348 put_task_struct(oc->chosen);
349 get_task_struct(task);
350 oc->chosen = task;
351 oc->chosen_points = points;
352 next:
353 return 0;
354 abort:
355 if (oc->chosen)
356 put_task_struct(oc->chosen);
357 oc->chosen = (void *)-1UL;
358 return 1;
359 }
360
361
362
363
364
365 static void select_bad_process(struct oom_control *oc)
366 {
367 if (is_memcg_oom(oc))
368 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
369 else {
370 struct task_struct *p;
371
372 rcu_read_lock();
373 for_each_process(p)
374 if (oom_evaluate_task(p, oc))
375 break;
376 rcu_read_unlock();
377 }
378 }
379
380 static int dump_task(struct task_struct *p, void *arg)
381 {
382 struct oom_control *oc = arg;
383 struct task_struct *task;
384
385 if (oom_unkillable_task(p))
386 return 0;
387
388
389 if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
390 return 0;
391
392 task = find_lock_task_mm(p);
393 if (!task) {
394
395
396
397
398
399 return 0;
400 }
401
402 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
403 task->pid, from_kuid(&init_user_ns, task_uid(task)),
404 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
405 mm_pgtables_bytes(task->mm),
406 get_mm_counter(task->mm, MM_SWAPENTS),
407 task->signal->oom_score_adj, task->comm);
408 task_unlock(task);
409
410 return 0;
411 }
412
413
414
415
416
417
418
419
420
421
422
423 static void dump_tasks(struct oom_control *oc)
424 {
425 pr_info("Tasks state (memory values in pages):\n");
426 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
427
428 if (is_memcg_oom(oc))
429 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
430 else {
431 struct task_struct *p;
432
433 rcu_read_lock();
434 for_each_process(p)
435 dump_task(p, oc);
436 rcu_read_unlock();
437 }
438 }
439
440 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
441 {
442
443 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
444 oom_constraint_text[oc->constraint],
445 nodemask_pr_args(oc->nodemask));
446 cpuset_print_current_mems_allowed();
447 mem_cgroup_print_oom_context(oc->memcg, victim);
448 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
449 from_kuid(&init_user_ns, task_uid(victim)));
450 }
451
452 static void dump_header(struct oom_control *oc, struct task_struct *p)
453 {
454 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
455 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
456 current->signal->oom_score_adj);
457 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
458 pr_warn("COMPACTION is disabled!!!\n");
459
460 dump_stack();
461 if (is_memcg_oom(oc))
462 mem_cgroup_print_oom_meminfo(oc->memcg);
463 else {
464 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
465 if (is_dump_unreclaim_slabs())
466 dump_unreclaimable_slab();
467 }
468 if (sysctl_oom_dump_tasks)
469 dump_tasks(oc);
470 if (p)
471 dump_oom_summary(oc, p);
472 }
473
474
475
476
477 static atomic_t oom_victims = ATOMIC_INIT(0);
478 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
479
480 static bool oom_killer_disabled __read_mostly;
481
482 #define K(x) ((x) << (PAGE_SHIFT-10))
483
484
485
486
487
488
489
490 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
491 {
492 struct task_struct *t;
493
494 for_each_thread(p, t) {
495 struct mm_struct *t_mm = READ_ONCE(t->mm);
496 if (t_mm)
497 return t_mm == mm;
498 }
499 return false;
500 }
501
502 #ifdef CONFIG_MMU
503
504
505
506
507 static struct task_struct *oom_reaper_th;
508 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
509 static struct task_struct *oom_reaper_list;
510 static DEFINE_SPINLOCK(oom_reaper_lock);
511
512 bool __oom_reap_task_mm(struct mm_struct *mm)
513 {
514 struct vm_area_struct *vma;
515 bool ret = true;
516
517
518
519
520
521
522
523 set_bit(MMF_UNSTABLE, &mm->flags);
524
525 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
526 if (!can_madv_lru_vma(vma))
527 continue;
528
529
530
531
532
533
534
535
536
537
538
539 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
540 struct mmu_notifier_range range;
541 struct mmu_gather tlb;
542
543 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
544 vma, mm, vma->vm_start,
545 vma->vm_end);
546 tlb_gather_mmu(&tlb, mm, range.start, range.end);
547 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
548 tlb_finish_mmu(&tlb, range.start, range.end);
549 ret = false;
550 continue;
551 }
552 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
553 mmu_notifier_invalidate_range_end(&range);
554 tlb_finish_mmu(&tlb, range.start, range.end);
555 }
556 }
557
558 return ret;
559 }
560
561
562
563
564
565
566
567 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
568 {
569 bool ret = true;
570
571 if (!down_read_trylock(&mm->mmap_sem)) {
572 trace_skip_task_reaping(tsk->pid);
573 return false;
574 }
575
576
577
578
579
580
581
582 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
583 trace_skip_task_reaping(tsk->pid);
584 goto out_unlock;
585 }
586
587 trace_start_task_reaping(tsk->pid);
588
589
590 ret = __oom_reap_task_mm(mm);
591 if (!ret)
592 goto out_finish;
593
594 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
595 task_pid_nr(tsk), tsk->comm,
596 K(get_mm_counter(mm, MM_ANONPAGES)),
597 K(get_mm_counter(mm, MM_FILEPAGES)),
598 K(get_mm_counter(mm, MM_SHMEMPAGES)));
599 out_finish:
600 trace_finish_task_reaping(tsk->pid);
601 out_unlock:
602 up_read(&mm->mmap_sem);
603
604 return ret;
605 }
606
607 #define MAX_OOM_REAP_RETRIES 10
608 static void oom_reap_task(struct task_struct *tsk)
609 {
610 int attempts = 0;
611 struct mm_struct *mm = tsk->signal->oom_mm;
612
613
614 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
615 schedule_timeout_idle(HZ/10);
616
617 if (attempts <= MAX_OOM_REAP_RETRIES ||
618 test_bit(MMF_OOM_SKIP, &mm->flags))
619 goto done;
620
621 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
622 task_pid_nr(tsk), tsk->comm);
623 debug_show_all_locks();
624
625 done:
626 tsk->oom_reaper_list = NULL;
627
628
629
630
631
632 set_bit(MMF_OOM_SKIP, &mm->flags);
633
634
635 put_task_struct(tsk);
636 }
637
638 static int oom_reaper(void *unused)
639 {
640 while (true) {
641 struct task_struct *tsk = NULL;
642
643 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
644 spin_lock(&oom_reaper_lock);
645 if (oom_reaper_list != NULL) {
646 tsk = oom_reaper_list;
647 oom_reaper_list = tsk->oom_reaper_list;
648 }
649 spin_unlock(&oom_reaper_lock);
650
651 if (tsk)
652 oom_reap_task(tsk);
653 }
654
655 return 0;
656 }
657
658 static void wake_oom_reaper(struct task_struct *tsk)
659 {
660
661 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
662 return;
663
664 get_task_struct(tsk);
665
666 spin_lock(&oom_reaper_lock);
667 tsk->oom_reaper_list = oom_reaper_list;
668 oom_reaper_list = tsk;
669 spin_unlock(&oom_reaper_lock);
670 trace_wake_reaper(tsk->pid);
671 wake_up(&oom_reaper_wait);
672 }
673
674 static int __init oom_init(void)
675 {
676 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
677 return 0;
678 }
679 subsys_initcall(oom_init)
680 #else
681 static inline void wake_oom_reaper(struct task_struct *tsk)
682 {
683 }
684 #endif
685
686
687
688
689
690
691
692
693
694
695
696 static void mark_oom_victim(struct task_struct *tsk)
697 {
698 struct mm_struct *mm = tsk->mm;
699
700 WARN_ON(oom_killer_disabled);
701
702 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
703 return;
704
705
706 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
707 mmgrab(tsk->signal->oom_mm);
708 set_bit(MMF_OOM_VICTIM, &mm->flags);
709 }
710
711
712
713
714
715
716
717 __thaw_task(tsk);
718 atomic_inc(&oom_victims);
719 trace_mark_victim(tsk->pid);
720 }
721
722
723
724
725 void exit_oom_victim(void)
726 {
727 clear_thread_flag(TIF_MEMDIE);
728
729 if (!atomic_dec_return(&oom_victims))
730 wake_up_all(&oom_victims_wait);
731 }
732
733
734
735
736 void oom_killer_enable(void)
737 {
738 oom_killer_disabled = false;
739 pr_info("OOM killer enabled.\n");
740 }
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757 bool oom_killer_disable(signed long timeout)
758 {
759 signed long ret;
760
761
762
763
764
765 if (mutex_lock_killable(&oom_lock))
766 return false;
767 oom_killer_disabled = true;
768 mutex_unlock(&oom_lock);
769
770 ret = wait_event_interruptible_timeout(oom_victims_wait,
771 !atomic_read(&oom_victims), timeout);
772 if (ret <= 0) {
773 oom_killer_enable();
774 return false;
775 }
776 pr_info("OOM killer disabled.\n");
777
778 return true;
779 }
780
781 static inline bool __task_will_free_mem(struct task_struct *task)
782 {
783 struct signal_struct *sig = task->signal;
784
785
786
787
788
789
790 if (sig->flags & SIGNAL_GROUP_COREDUMP)
791 return false;
792
793 if (sig->flags & SIGNAL_GROUP_EXIT)
794 return true;
795
796 if (thread_group_empty(task) && (task->flags & PF_EXITING))
797 return true;
798
799 return false;
800 }
801
802
803
804
805
806
807
808
809 static bool task_will_free_mem(struct task_struct *task)
810 {
811 struct mm_struct *mm = task->mm;
812 struct task_struct *p;
813 bool ret = true;
814
815
816
817
818
819
820 if (!mm)
821 return false;
822
823 if (!__task_will_free_mem(task))
824 return false;
825
826
827
828
829
830 if (test_bit(MMF_OOM_SKIP, &mm->flags))
831 return false;
832
833 if (atomic_read(&mm->mm_users) <= 1)
834 return true;
835
836
837
838
839
840
841 rcu_read_lock();
842 for_each_process(p) {
843 if (!process_shares_mm(p, mm))
844 continue;
845 if (same_thread_group(task, p))
846 continue;
847 ret = __task_will_free_mem(p);
848 if (!ret)
849 break;
850 }
851 rcu_read_unlock();
852
853 return ret;
854 }
855
856 static void __oom_kill_process(struct task_struct *victim, const char *message)
857 {
858 struct task_struct *p;
859 struct mm_struct *mm;
860 bool can_oom_reap = true;
861
862 p = find_lock_task_mm(victim);
863 if (!p) {
864 put_task_struct(victim);
865 return;
866 } else if (victim != p) {
867 get_task_struct(p);
868 put_task_struct(victim);
869 victim = p;
870 }
871
872
873 mm = victim->mm;
874 mmgrab(mm);
875
876
877 count_vm_event(OOM_KILL);
878 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
879
880
881
882
883
884
885 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
886 mark_oom_victim(victim);
887 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
888 message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
889 K(get_mm_counter(mm, MM_ANONPAGES)),
890 K(get_mm_counter(mm, MM_FILEPAGES)),
891 K(get_mm_counter(mm, MM_SHMEMPAGES)),
892 from_kuid(&init_user_ns, task_uid(victim)),
893 mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
894 task_unlock(victim);
895
896
897
898
899
900
901
902
903
904
905 rcu_read_lock();
906 for_each_process(p) {
907 if (!process_shares_mm(p, mm))
908 continue;
909 if (same_thread_group(p, victim))
910 continue;
911 if (is_global_init(p)) {
912 can_oom_reap = false;
913 set_bit(MMF_OOM_SKIP, &mm->flags);
914 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
915 task_pid_nr(victim), victim->comm,
916 task_pid_nr(p), p->comm);
917 continue;
918 }
919
920
921
922
923 if (unlikely(p->flags & PF_KTHREAD))
924 continue;
925 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
926 }
927 rcu_read_unlock();
928
929 if (can_oom_reap)
930 wake_oom_reaper(victim);
931
932 mmdrop(mm);
933 put_task_struct(victim);
934 }
935 #undef K
936
937
938
939
940
941 static int oom_kill_memcg_member(struct task_struct *task, void *message)
942 {
943 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
944 !is_global_init(task)) {
945 get_task_struct(task);
946 __oom_kill_process(task, message);
947 }
948 return 0;
949 }
950
951 static void oom_kill_process(struct oom_control *oc, const char *message)
952 {
953 struct task_struct *victim = oc->chosen;
954 struct mem_cgroup *oom_group;
955 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
956 DEFAULT_RATELIMIT_BURST);
957
958
959
960
961
962
963 task_lock(victim);
964 if (task_will_free_mem(victim)) {
965 mark_oom_victim(victim);
966 wake_oom_reaper(victim);
967 task_unlock(victim);
968 put_task_struct(victim);
969 return;
970 }
971 task_unlock(victim);
972
973 if (__ratelimit(&oom_rs))
974 dump_header(oc, victim);
975
976
977
978
979
980
981 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
982
983 __oom_kill_process(victim, message);
984
985
986
987
988 if (oom_group) {
989 mem_cgroup_print_oom_group(oom_group);
990 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
991 (void*)message);
992 mem_cgroup_put(oom_group);
993 }
994 }
995
996
997
998
999 static void check_panic_on_oom(struct oom_control *oc)
1000 {
1001 if (likely(!sysctl_panic_on_oom))
1002 return;
1003 if (sysctl_panic_on_oom != 2) {
1004
1005
1006
1007
1008
1009 if (oc->constraint != CONSTRAINT_NONE)
1010 return;
1011 }
1012
1013 if (is_sysrq_oom(oc))
1014 return;
1015 dump_header(oc, NULL);
1016 panic("Out of memory: %s panic_on_oom is enabled\n",
1017 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1018 }
1019
1020 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1021
1022 int register_oom_notifier(struct notifier_block *nb)
1023 {
1024 return blocking_notifier_chain_register(&oom_notify_list, nb);
1025 }
1026 EXPORT_SYMBOL_GPL(register_oom_notifier);
1027
1028 int unregister_oom_notifier(struct notifier_block *nb)
1029 {
1030 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1031 }
1032 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 bool out_of_memory(struct oom_control *oc)
1044 {
1045 unsigned long freed = 0;
1046
1047 if (oom_killer_disabled)
1048 return false;
1049
1050 if (!is_memcg_oom(oc)) {
1051 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1052 if (freed > 0)
1053
1054 return true;
1055 }
1056
1057
1058
1059
1060
1061
1062 if (task_will_free_mem(current)) {
1063 mark_oom_victim(current);
1064 wake_oom_reaper(current);
1065 return true;
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1076 return true;
1077
1078
1079
1080
1081
1082 oc->constraint = constrained_alloc(oc);
1083 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1084 oc->nodemask = NULL;
1085 check_panic_on_oom(oc);
1086
1087 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1088 current->mm && !oom_unkillable_task(current) &&
1089 oom_cpuset_eligible(current, oc) &&
1090 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1091 get_task_struct(current);
1092 oc->chosen = current;
1093 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1094 return true;
1095 }
1096
1097 select_bad_process(oc);
1098
1099 if (!oc->chosen) {
1100 dump_header(oc, NULL);
1101 pr_warn("Out of memory and no killable processes...\n");
1102
1103
1104
1105
1106
1107 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1108 panic("System is deadlocked on memory\n");
1109 }
1110 if (oc->chosen && oc->chosen != (void *)-1UL)
1111 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1112 "Memory cgroup out of memory");
1113 return !!oc->chosen;
1114 }
1115
1116
1117
1118
1119
1120
1121 void pagefault_out_of_memory(void)
1122 {
1123 struct oom_control oc = {
1124 .zonelist = NULL,
1125 .nodemask = NULL,
1126 .memcg = NULL,
1127 .gfp_mask = 0,
1128 .order = 0,
1129 };
1130
1131 if (mem_cgroup_oom_synchronize(true))
1132 return;
1133
1134 if (!mutex_trylock(&oom_lock))
1135 return;
1136 out_of_memory(&oc);
1137 mutex_unlock(&oom_lock);
1138 }