This source file includes following definitions.
- idle_inject_wakeup
- idle_inject_timer_fn
- idle_inject_fn
- idle_inject_set_duration
- idle_inject_get_duration
- idle_inject_start
- idle_inject_stop
- idle_inject_setup
- idle_inject_should_run
- idle_inject_register
- idle_inject_unregister
- idle_inject_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #define pr_fmt(fmt) "ii_dev: " fmt
39
40 #include <linux/cpu.h>
41 #include <linux/hrtimer.h>
42 #include <linux/kthread.h>
43 #include <linux/sched.h>
44 #include <linux/slab.h>
45 #include <linux/smpboot.h>
46
47 #include <uapi/linux/sched/types.h>
48
49
50
51
52
53
54 struct idle_inject_thread {
55 struct task_struct *tsk;
56 int should_run;
57 };
58
59
60
61
62
63
64
65
66 struct idle_inject_device {
67 struct hrtimer timer;
68 unsigned int idle_duration_us;
69 unsigned int run_duration_us;
70 unsigned long int cpumask[0];
71 };
72
73 static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
74 static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
75
76
77
78
79
80
81
82
83 static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
84 {
85 struct idle_inject_thread *iit;
86 unsigned int cpu;
87
88 for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
89 iit = per_cpu_ptr(&idle_inject_thread, cpu);
90 iit->should_run = 1;
91 wake_up_process(iit->tsk);
92 }
93 }
94
95
96
97
98
99
100
101
102
103
104
105 static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
106 {
107 unsigned int duration_us;
108 struct idle_inject_device *ii_dev =
109 container_of(timer, struct idle_inject_device, timer);
110
111 duration_us = READ_ONCE(ii_dev->run_duration_us);
112 duration_us += READ_ONCE(ii_dev->idle_duration_us);
113
114 idle_inject_wakeup(ii_dev);
115
116 hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
117
118 return HRTIMER_RESTART;
119 }
120
121
122
123
124
125
126
127
128 static void idle_inject_fn(unsigned int cpu)
129 {
130 struct idle_inject_device *ii_dev;
131 struct idle_inject_thread *iit;
132
133 ii_dev = per_cpu(idle_inject_device, cpu);
134 iit = per_cpu_ptr(&idle_inject_thread, cpu);
135
136
137
138
139 iit->should_run = 0;
140
141 play_idle(READ_ONCE(ii_dev->idle_duration_us));
142 }
143
144
145
146
147
148
149 void idle_inject_set_duration(struct idle_inject_device *ii_dev,
150 unsigned int run_duration_us,
151 unsigned int idle_duration_us)
152 {
153 if (run_duration_us && idle_duration_us) {
154 WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
155 WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
156 }
157 }
158
159
160
161
162
163
164 void idle_inject_get_duration(struct idle_inject_device *ii_dev,
165 unsigned int *run_duration_us,
166 unsigned int *idle_duration_us)
167 {
168 *run_duration_us = READ_ONCE(ii_dev->run_duration_us);
169 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
170 }
171
172
173
174
175
176
177
178
179
180
181
182 int idle_inject_start(struct idle_inject_device *ii_dev)
183 {
184 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
185 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
186
187 if (!idle_duration_us || !run_duration_us)
188 return -EINVAL;
189
190 pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
191 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
192
193 idle_inject_wakeup(ii_dev);
194
195 hrtimer_start(&ii_dev->timer,
196 ns_to_ktime((idle_duration_us + run_duration_us) *
197 NSEC_PER_USEC),
198 HRTIMER_MODE_REL);
199
200 return 0;
201 }
202
203
204
205
206
207
208
209
210
211
212
213
214 void idle_inject_stop(struct idle_inject_device *ii_dev)
215 {
216 struct idle_inject_thread *iit;
217 unsigned int cpu;
218
219 pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
220 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
221
222 hrtimer_cancel(&ii_dev->timer);
223
224
225
226
227
228
229
230
231 cpu_hotplug_disable();
232
233
234
235
236
237
238
239 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
240 iit = per_cpu_ptr(&idle_inject_thread, cpu);
241 iit->should_run = 0;
242
243 wait_task_inactive(iit->tsk, 0);
244 }
245
246 cpu_hotplug_enable();
247 }
248
249
250
251
252
253
254
255
256 static void idle_inject_setup(unsigned int cpu)
257 {
258 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
259
260 sched_setscheduler(current, SCHED_FIFO, ¶m);
261 }
262
263
264
265
266
267
268
269 static int idle_inject_should_run(unsigned int cpu)
270 {
271 struct idle_inject_thread *iit =
272 per_cpu_ptr(&idle_inject_thread, cpu);
273
274 return iit->should_run;
275 }
276
277
278
279
280
281
282
283
284
285
286
287
288 struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
289 {
290 struct idle_inject_device *ii_dev;
291 int cpu, cpu_rb;
292
293 ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
294 if (!ii_dev)
295 return NULL;
296
297 cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
298 hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
299 ii_dev->timer.function = idle_inject_timer_fn;
300
301 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
302
303 if (per_cpu(idle_inject_device, cpu)) {
304 pr_err("cpu%d is already registered\n", cpu);
305 goto out_rollback;
306 }
307
308 per_cpu(idle_inject_device, cpu) = ii_dev;
309 }
310
311 return ii_dev;
312
313 out_rollback:
314 for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
315 if (cpu == cpu_rb)
316 break;
317 per_cpu(idle_inject_device, cpu_rb) = NULL;
318 }
319
320 kfree(ii_dev);
321
322 return NULL;
323 }
324
325
326
327
328
329
330
331
332
333 void idle_inject_unregister(struct idle_inject_device *ii_dev)
334 {
335 unsigned int cpu;
336
337 idle_inject_stop(ii_dev);
338
339 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
340 per_cpu(idle_inject_device, cpu) = NULL;
341
342 kfree(ii_dev);
343 }
344
345 static struct smp_hotplug_thread idle_inject_threads = {
346 .store = &idle_inject_thread.tsk,
347 .setup = idle_inject_setup,
348 .thread_fn = idle_inject_fn,
349 .thread_comm = "idle_inject/%u",
350 .thread_should_run = idle_inject_should_run,
351 };
352
353 static int __init idle_inject_init(void)
354 {
355 return smpboot_register_percpu_thread(&idle_inject_threads);
356 }
357 early_initcall(idle_inject_init);