This source file includes following definitions.
- try_to_freeze_tasks
- freeze_processes
- freeze_kernel_threads
- thaw_processes
- thaw_kernel_threads
1
2
3
4
5
6
7
8
9
10 #undef DEBUG
11
12 #include <linux/interrupt.h>
13 #include <linux/oom.h>
14 #include <linux/suspend.h>
15 #include <linux/module.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task.h>
18 #include <linux/syscalls.h>
19 #include <linux/freezer.h>
20 #include <linux/delay.h>
21 #include <linux/workqueue.h>
22 #include <linux/kmod.h>
23 #include <trace/events/power.h>
24 #include <linux/cpuset.h>
25
26
27
28
29 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
30
31 static int try_to_freeze_tasks(bool user_only)
32 {
33 struct task_struct *g, *p;
34 unsigned long end_time;
35 unsigned int todo;
36 bool wq_busy = false;
37 ktime_t start, end, elapsed;
38 unsigned int elapsed_msecs;
39 bool wakeup = false;
40 int sleep_usecs = USEC_PER_MSEC;
41
42 start = ktime_get_boottime();
43
44 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
45
46 if (!user_only)
47 freeze_workqueues_begin();
48
49 while (true) {
50 todo = 0;
51 read_lock(&tasklist_lock);
52 for_each_process_thread(g, p) {
53 if (p == current || !freeze_task(p))
54 continue;
55
56 if (!freezer_should_skip(p))
57 todo++;
58 }
59 read_unlock(&tasklist_lock);
60
61 if (!user_only) {
62 wq_busy = freeze_workqueues_busy();
63 todo += wq_busy;
64 }
65
66 if (!todo || time_after(jiffies, end_time))
67 break;
68
69 if (pm_wakeup_pending()) {
70 wakeup = true;
71 break;
72 }
73
74
75
76
77
78
79 usleep_range(sleep_usecs / 2, sleep_usecs);
80 if (sleep_usecs < 8 * USEC_PER_MSEC)
81 sleep_usecs *= 2;
82 }
83
84 end = ktime_get_boottime();
85 elapsed = ktime_sub(end, start);
86 elapsed_msecs = ktime_to_ms(elapsed);
87
88 if (todo) {
89 pr_cont("\n");
90 pr_err("Freezing of tasks %s after %d.%03d seconds "
91 "(%d tasks refusing to freeze, wq_busy=%d):\n",
92 wakeup ? "aborted" : "failed",
93 elapsed_msecs / 1000, elapsed_msecs % 1000,
94 todo - wq_busy, wq_busy);
95
96 if (wq_busy)
97 show_workqueue_state();
98
99 if (!wakeup || pm_debug_messages_on) {
100 read_lock(&tasklist_lock);
101 for_each_process_thread(g, p) {
102 if (p != current && !freezer_should_skip(p)
103 && freezing(p) && !frozen(p))
104 sched_show_task(p);
105 }
106 read_unlock(&tasklist_lock);
107 }
108 } else {
109 pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
110 elapsed_msecs % 1000);
111 }
112
113 return todo ? -EBUSY : 0;
114 }
115
116
117
118
119
120
121
122
123 int freeze_processes(void)
124 {
125 int error;
126
127 error = __usermodehelper_disable(UMH_FREEZING);
128 if (error)
129 return error;
130
131
132 current->flags |= PF_SUSPEND_TASK;
133
134 if (!pm_freezing)
135 atomic_inc(&system_freezing_cnt);
136
137 pm_wakeup_clear(true);
138 pr_info("Freezing user space processes ... ");
139 pm_freezing = true;
140 error = try_to_freeze_tasks(true);
141 if (!error) {
142 __usermodehelper_set_disable_depth(UMH_DISABLED);
143 pr_cont("done.");
144 }
145 pr_cont("\n");
146 BUG_ON(in_atomic());
147
148
149
150
151
152
153
154 if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
155 error = -EBUSY;
156
157 if (error)
158 thaw_processes();
159 return error;
160 }
161
162
163
164
165
166
167
168
169
170 int freeze_kernel_threads(void)
171 {
172 int error;
173
174 pr_info("Freezing remaining freezable tasks ... ");
175
176 pm_nosig_freezing = true;
177 error = try_to_freeze_tasks(false);
178 if (!error)
179 pr_cont("done.");
180
181 pr_cont("\n");
182 BUG_ON(in_atomic());
183
184 if (error)
185 thaw_kernel_threads();
186 return error;
187 }
188
189 void thaw_processes(void)
190 {
191 struct task_struct *g, *p;
192 struct task_struct *curr = current;
193
194 trace_suspend_resume(TPS("thaw_processes"), 0, true);
195 if (pm_freezing)
196 atomic_dec(&system_freezing_cnt);
197 pm_freezing = false;
198 pm_nosig_freezing = false;
199
200 oom_killer_enable();
201
202 pr_info("Restarting tasks ... ");
203
204 __usermodehelper_set_disable_depth(UMH_FREEZING);
205 thaw_workqueues();
206
207 cpuset_wait_for_hotplug();
208
209 read_lock(&tasklist_lock);
210 for_each_process_thread(g, p) {
211
212 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
213 __thaw_task(p);
214 }
215 read_unlock(&tasklist_lock);
216
217 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
218 curr->flags &= ~PF_SUSPEND_TASK;
219
220 usermodehelper_enable();
221
222 schedule();
223 pr_cont("done.\n");
224 trace_suspend_resume(TPS("thaw_processes"), 0, false);
225 }
226
227 void thaw_kernel_threads(void)
228 {
229 struct task_struct *g, *p;
230
231 pm_nosig_freezing = false;
232 pr_info("Restarting kernel threads ... ");
233
234 thaw_workqueues();
235
236 read_lock(&tasklist_lock);
237 for_each_process_thread(g, p) {
238 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
239 __thaw_task(p);
240 }
241 read_unlock(&tasklist_lock);
242
243 schedule();
244 pr_cont("done.\n");
245 }