1 /*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #include <linux/clockchips.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/notifier.h>
16 #include <linux/pm_qos.h>
17 #include <linux/cpu.h>
18 #include <linux/cpuidle.h>
19 #include <linux/ktime.h>
20 #include <linux/hrtimer.h>
21 #include <linux/module.h>
22 #include <linux/suspend.h>
23 #include <linux/tick.h>
24 #include <trace/events/power.h>
25
26 #include "cpuidle.h"
27
28 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
29 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
30
31 DEFINE_MUTEX(cpuidle_lock);
32 LIST_HEAD(cpuidle_detected_devices);
33
34 static int enabled_devices;
35 static int off __read_mostly;
36 static int initialized __read_mostly;
37
cpuidle_disabled(void)38 int cpuidle_disabled(void)
39 {
40 return off;
41 }
disable_cpuidle(void)42 void disable_cpuidle(void)
43 {
44 off = 1;
45 }
46
cpuidle_not_available(struct cpuidle_driver * drv,struct cpuidle_device * dev)47 bool cpuidle_not_available(struct cpuidle_driver *drv,
48 struct cpuidle_device *dev)
49 {
50 return off || !initialized || !drv || !dev || !dev->enabled;
51 }
52
53 /**
54 * cpuidle_play_dead - cpu off-lining
55 *
56 * Returns in case of an error or no driver
57 */
cpuidle_play_dead(void)58 int cpuidle_play_dead(void)
59 {
60 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
61 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
62 int i;
63
64 if (!drv)
65 return -ENODEV;
66
67 /* Find lowest-power state that supports long-term idle */
68 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
69 if (drv->states[i].enter_dead)
70 return drv->states[i].enter_dead(dev, i);
71
72 return -ENODEV;
73 }
74
find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev,bool freeze)75 static int find_deepest_state(struct cpuidle_driver *drv,
76 struct cpuidle_device *dev, bool freeze)
77 {
78 unsigned int latency_req = 0;
79 int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1;
80
81 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
82 struct cpuidle_state *s = &drv->states[i];
83 struct cpuidle_state_usage *su = &dev->states_usage[i];
84
85 if (s->disabled || su->disable || s->exit_latency <= latency_req
86 || (freeze && !s->enter_freeze))
87 continue;
88
89 latency_req = s->exit_latency;
90 ret = i;
91 }
92 return ret;
93 }
94
95 /**
96 * cpuidle_find_deepest_state - Find the deepest available idle state.
97 * @drv: cpuidle driver for the given CPU.
98 * @dev: cpuidle device for the given CPU.
99 */
cpuidle_find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev)100 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
101 struct cpuidle_device *dev)
102 {
103 return find_deepest_state(drv, dev, false);
104 }
105
enter_freeze_proper(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)106 static void enter_freeze_proper(struct cpuidle_driver *drv,
107 struct cpuidle_device *dev, int index)
108 {
109 tick_freeze();
110 /*
111 * The state used here cannot be a "coupled" one, because the "coupled"
112 * cpuidle mechanism enables interrupts and doing that with timekeeping
113 * suspended is generally unsafe.
114 */
115 drv->states[index].enter_freeze(dev, drv, index);
116 WARN_ON(!irqs_disabled());
117 /*
118 * timekeeping_resume() that will be called by tick_unfreeze() for the
119 * last CPU executing it calls functions containing RCU read-side
120 * critical sections, so tell RCU about that.
121 */
122 RCU_NONIDLE(tick_unfreeze());
123 }
124
125 /**
126 * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
127 * @drv: cpuidle driver for the given CPU.
128 * @dev: cpuidle device for the given CPU.
129 *
130 * If there are states with the ->enter_freeze callback, find the deepest of
131 * them and enter it with frozen tick.
132 */
cpuidle_enter_freeze(struct cpuidle_driver * drv,struct cpuidle_device * dev)133 int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
134 {
135 int index;
136
137 /*
138 * Find the deepest state with ->enter_freeze present, which guarantees
139 * that interrupts won't be enabled when it exits and allows the tick to
140 * be frozen safely.
141 */
142 index = find_deepest_state(drv, dev, true);
143 if (index >= 0)
144 enter_freeze_proper(drv, dev, index);
145
146 return index;
147 }
148
149 /**
150 * cpuidle_enter_state - enter the state and update stats
151 * @dev: cpuidle device for this cpu
152 * @drv: cpuidle driver for this cpu
153 * @next_state: index into drv->states of the state to enter
154 */
cpuidle_enter_state(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)155 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
156 int index)
157 {
158 int entered_state;
159
160 struct cpuidle_state *target_state = &drv->states[index];
161 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
162 ktime_t time_start, time_end;
163 s64 diff;
164
165 /*
166 * Tell the time framework to switch to a broadcast timer because our
167 * local timer will be shut down. If a local timer is used from another
168 * CPU as a broadcast timer, this call may fail if it is not available.
169 */
170 if (broadcast && tick_broadcast_enter())
171 return -EBUSY;
172
173 trace_cpu_idle_rcuidle(index, dev->cpu);
174 time_start = ktime_get();
175
176 entered_state = target_state->enter(dev, drv, index);
177
178 time_end = ktime_get();
179 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
180
181 if (broadcast) {
182 if (WARN_ON_ONCE(!irqs_disabled()))
183 local_irq_disable();
184
185 tick_broadcast_exit();
186 }
187
188 if (!cpuidle_state_is_coupled(drv, index))
189 local_irq_enable();
190
191 diff = ktime_to_us(ktime_sub(time_end, time_start));
192 if (diff > INT_MAX)
193 diff = INT_MAX;
194
195 dev->last_residency = (int) diff;
196
197 if (entered_state >= 0) {
198 /* Update cpuidle counters */
199 /* This can be moved to within driver enter routine
200 * but that results in multiple copies of same code.
201 */
202 dev->states_usage[entered_state].time += dev->last_residency;
203 dev->states_usage[entered_state].usage++;
204 } else {
205 dev->last_residency = 0;
206 }
207
208 return entered_state;
209 }
210
211 /**
212 * cpuidle_select - ask the cpuidle framework to choose an idle state
213 *
214 * @drv: the cpuidle driver
215 * @dev: the cpuidle device
216 *
217 * Returns the index of the idle state.
218 */
cpuidle_select(struct cpuidle_driver * drv,struct cpuidle_device * dev)219 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
220 {
221 return cpuidle_curr_governor->select(drv, dev);
222 }
223
224 /**
225 * cpuidle_enter - enter into the specified idle state
226 *
227 * @drv: the cpuidle driver tied with the cpu
228 * @dev: the cpuidle device
229 * @index: the index in the idle state table
230 *
231 * Returns the index in the idle state, < 0 in case of error.
232 * The error code depends on the backend driver
233 */
cpuidle_enter(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)234 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
235 int index)
236 {
237 if (cpuidle_state_is_coupled(drv, index))
238 return cpuidle_enter_state_coupled(dev, drv, index);
239 return cpuidle_enter_state(dev, drv, index);
240 }
241
242 /**
243 * cpuidle_reflect - tell the underlying governor what was the state
244 * we were in
245 *
246 * @dev : the cpuidle device
247 * @index: the index in the idle state table
248 *
249 */
cpuidle_reflect(struct cpuidle_device * dev,int index)250 void cpuidle_reflect(struct cpuidle_device *dev, int index)
251 {
252 if (cpuidle_curr_governor->reflect)
253 cpuidle_curr_governor->reflect(dev, index);
254 }
255
256 /**
257 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
258 */
cpuidle_install_idle_handler(void)259 void cpuidle_install_idle_handler(void)
260 {
261 if (enabled_devices) {
262 /* Make sure all changes finished before we switch to new idle */
263 smp_wmb();
264 initialized = 1;
265 }
266 }
267
268 /**
269 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
270 */
cpuidle_uninstall_idle_handler(void)271 void cpuidle_uninstall_idle_handler(void)
272 {
273 if (enabled_devices) {
274 initialized = 0;
275 wake_up_all_idle_cpus();
276 }
277
278 /*
279 * Make sure external observers (such as the scheduler)
280 * are done looking at pointed idle states.
281 */
282 synchronize_rcu();
283 }
284
285 /**
286 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
287 */
cpuidle_pause_and_lock(void)288 void cpuidle_pause_and_lock(void)
289 {
290 mutex_lock(&cpuidle_lock);
291 cpuidle_uninstall_idle_handler();
292 }
293
294 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
295
296 /**
297 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
298 */
cpuidle_resume_and_unlock(void)299 void cpuidle_resume_and_unlock(void)
300 {
301 cpuidle_install_idle_handler();
302 mutex_unlock(&cpuidle_lock);
303 }
304
305 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
306
307 /* Currently used in suspend/resume path to suspend cpuidle */
cpuidle_pause(void)308 void cpuidle_pause(void)
309 {
310 mutex_lock(&cpuidle_lock);
311 cpuidle_uninstall_idle_handler();
312 mutex_unlock(&cpuidle_lock);
313 }
314
315 /* Currently used in suspend/resume path to resume cpuidle */
cpuidle_resume(void)316 void cpuidle_resume(void)
317 {
318 mutex_lock(&cpuidle_lock);
319 cpuidle_install_idle_handler();
320 mutex_unlock(&cpuidle_lock);
321 }
322
323 /**
324 * cpuidle_enable_device - enables idle PM for a CPU
325 * @dev: the CPU
326 *
327 * This function must be called between cpuidle_pause_and_lock and
328 * cpuidle_resume_and_unlock when used externally.
329 */
cpuidle_enable_device(struct cpuidle_device * dev)330 int cpuidle_enable_device(struct cpuidle_device *dev)
331 {
332 int ret;
333 struct cpuidle_driver *drv;
334
335 if (!dev)
336 return -EINVAL;
337
338 if (dev->enabled)
339 return 0;
340
341 drv = cpuidle_get_cpu_driver(dev);
342
343 if (!drv || !cpuidle_curr_governor)
344 return -EIO;
345
346 if (!dev->registered)
347 return -EINVAL;
348
349 ret = cpuidle_add_device_sysfs(dev);
350 if (ret)
351 return ret;
352
353 if (cpuidle_curr_governor->enable &&
354 (ret = cpuidle_curr_governor->enable(drv, dev)))
355 goto fail_sysfs;
356
357 smp_wmb();
358
359 dev->enabled = 1;
360
361 enabled_devices++;
362 return 0;
363
364 fail_sysfs:
365 cpuidle_remove_device_sysfs(dev);
366
367 return ret;
368 }
369
370 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
371
372 /**
373 * cpuidle_disable_device - disables idle PM for a CPU
374 * @dev: the CPU
375 *
376 * This function must be called between cpuidle_pause_and_lock and
377 * cpuidle_resume_and_unlock when used externally.
378 */
cpuidle_disable_device(struct cpuidle_device * dev)379 void cpuidle_disable_device(struct cpuidle_device *dev)
380 {
381 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
382
383 if (!dev || !dev->enabled)
384 return;
385
386 if (!drv || !cpuidle_curr_governor)
387 return;
388
389 dev->enabled = 0;
390
391 if (cpuidle_curr_governor->disable)
392 cpuidle_curr_governor->disable(drv, dev);
393
394 cpuidle_remove_device_sysfs(dev);
395 enabled_devices--;
396 }
397
398 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
399
__cpuidle_unregister_device(struct cpuidle_device * dev)400 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
401 {
402 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
403
404 list_del(&dev->device_list);
405 per_cpu(cpuidle_devices, dev->cpu) = NULL;
406 module_put(drv->owner);
407
408 dev->registered = 0;
409 }
410
__cpuidle_device_init(struct cpuidle_device * dev)411 static void __cpuidle_device_init(struct cpuidle_device *dev)
412 {
413 memset(dev->states_usage, 0, sizeof(dev->states_usage));
414 dev->last_residency = 0;
415 }
416
417 /**
418 * __cpuidle_register_device - internal register function called before register
419 * and enable routines
420 * @dev: the cpu
421 *
422 * cpuidle_lock mutex must be held before this is called
423 */
__cpuidle_register_device(struct cpuidle_device * dev)424 static int __cpuidle_register_device(struct cpuidle_device *dev)
425 {
426 int ret;
427 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
428
429 if (!try_module_get(drv->owner))
430 return -EINVAL;
431
432 per_cpu(cpuidle_devices, dev->cpu) = dev;
433 list_add(&dev->device_list, &cpuidle_detected_devices);
434
435 ret = cpuidle_coupled_register_device(dev);
436 if (ret)
437 __cpuidle_unregister_device(dev);
438 else
439 dev->registered = 1;
440
441 return ret;
442 }
443
444 /**
445 * cpuidle_register_device - registers a CPU's idle PM feature
446 * @dev: the cpu
447 */
cpuidle_register_device(struct cpuidle_device * dev)448 int cpuidle_register_device(struct cpuidle_device *dev)
449 {
450 int ret = -EBUSY;
451
452 if (!dev)
453 return -EINVAL;
454
455 mutex_lock(&cpuidle_lock);
456
457 if (dev->registered)
458 goto out_unlock;
459
460 __cpuidle_device_init(dev);
461
462 ret = __cpuidle_register_device(dev);
463 if (ret)
464 goto out_unlock;
465
466 ret = cpuidle_add_sysfs(dev);
467 if (ret)
468 goto out_unregister;
469
470 ret = cpuidle_enable_device(dev);
471 if (ret)
472 goto out_sysfs;
473
474 cpuidle_install_idle_handler();
475
476 out_unlock:
477 mutex_unlock(&cpuidle_lock);
478
479 return ret;
480
481 out_sysfs:
482 cpuidle_remove_sysfs(dev);
483 out_unregister:
484 __cpuidle_unregister_device(dev);
485 goto out_unlock;
486 }
487
488 EXPORT_SYMBOL_GPL(cpuidle_register_device);
489
490 /**
491 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
492 * @dev: the cpu
493 */
cpuidle_unregister_device(struct cpuidle_device * dev)494 void cpuidle_unregister_device(struct cpuidle_device *dev)
495 {
496 if (!dev || dev->registered == 0)
497 return;
498
499 cpuidle_pause_and_lock();
500
501 cpuidle_disable_device(dev);
502
503 cpuidle_remove_sysfs(dev);
504
505 __cpuidle_unregister_device(dev);
506
507 cpuidle_coupled_unregister_device(dev);
508
509 cpuidle_resume_and_unlock();
510 }
511
512 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
513
514 /**
515 * cpuidle_unregister: unregister a driver and the devices. This function
516 * can be used only if the driver has been previously registered through
517 * the cpuidle_register function.
518 *
519 * @drv: a valid pointer to a struct cpuidle_driver
520 */
cpuidle_unregister(struct cpuidle_driver * drv)521 void cpuidle_unregister(struct cpuidle_driver *drv)
522 {
523 int cpu;
524 struct cpuidle_device *device;
525
526 for_each_cpu(cpu, drv->cpumask) {
527 device = &per_cpu(cpuidle_dev, cpu);
528 cpuidle_unregister_device(device);
529 }
530
531 cpuidle_unregister_driver(drv);
532 }
533 EXPORT_SYMBOL_GPL(cpuidle_unregister);
534
535 /**
536 * cpuidle_register: registers the driver and the cpu devices with the
537 * coupled_cpus passed as parameter. This function is used for all common
538 * initialization pattern there are in the arch specific drivers. The
539 * devices is globally defined in this file.
540 *
541 * @drv : a valid pointer to a struct cpuidle_driver
542 * @coupled_cpus: a cpumask for the coupled states
543 *
544 * Returns 0 on success, < 0 otherwise
545 */
cpuidle_register(struct cpuidle_driver * drv,const struct cpumask * const coupled_cpus)546 int cpuidle_register(struct cpuidle_driver *drv,
547 const struct cpumask *const coupled_cpus)
548 {
549 int ret, cpu;
550 struct cpuidle_device *device;
551
552 ret = cpuidle_register_driver(drv);
553 if (ret) {
554 pr_err("failed to register cpuidle driver\n");
555 return ret;
556 }
557
558 for_each_cpu(cpu, drv->cpumask) {
559 device = &per_cpu(cpuidle_dev, cpu);
560 device->cpu = cpu;
561
562 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
563 /*
564 * On multiplatform for ARM, the coupled idle states could be
565 * enabled in the kernel even if the cpuidle driver does not
566 * use it. Note, coupled_cpus is a struct copy.
567 */
568 if (coupled_cpus)
569 device->coupled_cpus = *coupled_cpus;
570 #endif
571 ret = cpuidle_register_device(device);
572 if (!ret)
573 continue;
574
575 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
576
577 cpuidle_unregister(drv);
578 break;
579 }
580
581 return ret;
582 }
583 EXPORT_SYMBOL_GPL(cpuidle_register);
584
585 #ifdef CONFIG_SMP
586
587 /*
588 * This function gets called when a part of the kernel has a new latency
589 * requirement. This means we need to get all processors out of their C-state,
590 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
591 * wakes them all right up.
592 */
cpuidle_latency_notify(struct notifier_block * b,unsigned long l,void * v)593 static int cpuidle_latency_notify(struct notifier_block *b,
594 unsigned long l, void *v)
595 {
596 wake_up_all_idle_cpus();
597 return NOTIFY_OK;
598 }
599
600 static struct notifier_block cpuidle_latency_notifier = {
601 .notifier_call = cpuidle_latency_notify,
602 };
603
latency_notifier_init(struct notifier_block * n)604 static inline void latency_notifier_init(struct notifier_block *n)
605 {
606 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
607 }
608
609 #else /* CONFIG_SMP */
610
611 #define latency_notifier_init(x) do { } while (0)
612
613 #endif /* CONFIG_SMP */
614
615 /**
616 * cpuidle_init - core initializer
617 */
cpuidle_init(void)618 static int __init cpuidle_init(void)
619 {
620 int ret;
621
622 if (cpuidle_disabled())
623 return -ENODEV;
624
625 ret = cpuidle_add_interface(cpu_subsys.dev_root);
626 if (ret)
627 return ret;
628
629 latency_notifier_init(&cpuidle_latency_notifier);
630
631 return 0;
632 }
633
634 module_param(off, int, 0444);
635 core_initcall(cpuidle_init);
636