This source file includes following definitions.
- has_target
- cpufreq_disabled
- disable_cpufreq
- have_governor_per_policy
- get_governor_parent_kobj
- get_cpu_idle_time_jiffy
- get_cpu_idle_time
- arch_set_freq_scale
- cpufreq_generic_init
- cpufreq_cpu_get_raw
- cpufreq_generic_get
- cpufreq_cpu_get
- cpufreq_cpu_put
- cpufreq_cpu_release
- cpufreq_cpu_acquire
- adjust_jiffies
- cpufreq_notify_transition
- cpufreq_notify_post_transition
- cpufreq_freq_transition_begin
- cpufreq_freq_transition_end
- cpufreq_list_transition_notifiers
- cpufreq_enable_fast_switch
- cpufreq_disable_fast_switch
- cpufreq_driver_resolve_freq
- cpufreq_policy_transition_delay_us
- show_boost
- store_boost
- find_governor
- cpufreq_parse_policy
- cpufreq_parse_governor
- arch_freq_get_on_cpu
- show_scaling_cur_freq
- show_cpuinfo_cur_freq
- show_scaling_governor
- store_scaling_governor
- show_scaling_driver
- show_scaling_available_governors
- cpufreq_show_cpus
- show_related_cpus
- show_affected_cpus
- store_scaling_setspeed
- show_scaling_setspeed
- show_bios_limit
- show
- store
- cpufreq_sysfs_release
- add_cpu_dev_symlink
- remove_cpu_dev_symlink
- cpufreq_add_dev_interface
- cpufreq_default_governor
- cpufreq_init_policy
- cpufreq_add_policy_cpu
- refresh_frequency_limits
- handle_update
- cpufreq_notifier_min
- cpufreq_notifier_max
- cpufreq_policy_put_kobj
- cpufreq_policy_alloc
- cpufreq_policy_free
- cpufreq_online
- cpufreq_add_dev
- cpufreq_offline
- cpufreq_remove_dev
- cpufreq_out_of_sync
- cpufreq_verify_current_freq
- cpufreq_quick_get
- cpufreq_quick_get_max
- __cpufreq_get
- cpufreq_get
- cpufreq_generic_suspend
- cpufreq_suspend
- cpufreq_resume
- cpufreq_get_current_driver
- cpufreq_get_driver_data
- cpufreq_register_notifier
- cpufreq_unregister_notifier
- cpufreq_driver_fast_switch
- __target_intermediate
- __target_index
- __cpufreq_driver_target
- cpufreq_driver_target
- cpufreq_fallback_governor
- cpufreq_init_governor
- cpufreq_exit_governor
- cpufreq_start_governor
- cpufreq_stop_governor
- cpufreq_governor_limits
- cpufreq_register_governor
- cpufreq_unregister_governor
- cpufreq_get_policy
- cpufreq_set_policy
- cpufreq_update_policy
- cpufreq_update_limits
- cpufreq_boost_set_sw
- cpufreq_boost_trigger_state
- cpufreq_boost_supported
- create_boost_sysfs_file
- remove_boost_sysfs_file
- cpufreq_enable_boost_support
- cpufreq_boost_enabled
- cpuhp_cpufreq_online
- cpuhp_cpufreq_offline
- cpufreq_register_driver
- cpufreq_unregister_driver
- cpufreq_core_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 static LIST_HEAD(cpufreq_policy_list);
34
35
36 #define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
39
40 #define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
44
45 #define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
47
48
49 static LIST_HEAD(cpufreq_governor_list);
50 #define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
52
53
54
55
56
57
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
61
62
63 static bool cpufreq_suspended;
64
65 static inline bool has_target(void)
66 {
67 return cpufreq_driver->target_index || cpufreq_driver->target;
68 }
69
70
71 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
72 static int cpufreq_init_governor(struct cpufreq_policy *policy);
73 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
74 static int cpufreq_start_governor(struct cpufreq_policy *policy);
75 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
76 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
77 static int cpufreq_set_policy(struct cpufreq_policy *policy,
78 struct cpufreq_governor *new_gov,
79 unsigned int new_pol);
80
81
82
83
84
85
86
87
88 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
89 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
90
91 static int off __read_mostly;
92 static int cpufreq_disabled(void)
93 {
94 return off;
95 }
96 void disable_cpufreq(void)
97 {
98 off = 1;
99 }
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
101
102 bool have_governor_per_policy(void)
103 {
104 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 }
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107
108 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
109 {
110 if (have_governor_per_policy())
111 return &policy->kobj;
112 else
113 return cpufreq_global_kobject;
114 }
115 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
116
117 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
118 {
119 u64 idle_time;
120 u64 cur_wall_time;
121 u64 busy_time;
122
123 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
124
125 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
131
132 idle_time = cur_wall_time - busy_time;
133 if (wall)
134 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
135
136 return div_u64(idle_time, NSEC_PER_USEC);
137 }
138
139 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
140 {
141 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
142
143 if (idle_time == -1ULL)
144 return get_cpu_idle_time_jiffy(cpu, wall);
145 else if (!io_busy)
146 idle_time += get_cpu_iowait_time_us(cpu, wall);
147
148 return idle_time;
149 }
150 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151
152 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
153 unsigned long max_freq)
154 {
155 }
156 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
157
158
159
160
161
162
163
164
165 void cpufreq_generic_init(struct cpufreq_policy *policy,
166 struct cpufreq_frequency_table *table,
167 unsigned int transition_latency)
168 {
169 policy->freq_table = table;
170 policy->cpuinfo.transition_latency = transition_latency;
171
172
173
174
175
176 cpumask_setall(policy->cpus);
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
180 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
181 {
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
185 }
186 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
187
188 unsigned int cpufreq_generic_get(unsigned int cpu)
189 {
190 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
191
192 if (!policy || IS_ERR(policy->clk)) {
193 pr_err("%s: No %s associated to cpu: %d\n",
194 __func__, policy ? "clk" : "policy", cpu);
195 return 0;
196 }
197
198 return clk_get_rate(policy->clk) / 1000;
199 }
200 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
201
202
203
204
205
206
207
208
209
210
211
212
213 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
214 {
215 struct cpufreq_policy *policy = NULL;
216 unsigned long flags;
217
218 if (WARN_ON(cpu >= nr_cpu_ids))
219 return NULL;
220
221
222 read_lock_irqsave(&cpufreq_driver_lock, flags);
223
224 if (cpufreq_driver) {
225
226 policy = cpufreq_cpu_get_raw(cpu);
227 if (policy)
228 kobject_get(&policy->kobj);
229 }
230
231 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
232
233 return policy;
234 }
235 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
236
237
238
239
240
241 void cpufreq_cpu_put(struct cpufreq_policy *policy)
242 {
243 kobject_put(&policy->kobj);
244 }
245 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
246
247
248
249
250
251 void cpufreq_cpu_release(struct cpufreq_policy *policy)
252 {
253 if (WARN_ON(!policy))
254 return;
255
256 lockdep_assert_held(&policy->rwsem);
257
258 up_write(&policy->rwsem);
259
260 cpufreq_cpu_put(policy);
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274
275 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
276 {
277 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
278
279 if (!policy)
280 return NULL;
281
282 down_write(&policy->rwsem);
283
284 if (policy_is_inactive(policy)) {
285 cpufreq_cpu_release(policy);
286 return NULL;
287 }
288
289 return policy;
290 }
291
292
293
294
295
296
297
298
299
300
301
302
303
304 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
305 {
306 #ifndef CONFIG_SMP
307 static unsigned long l_p_j_ref;
308 static unsigned int l_p_j_ref_freq;
309
310 if (ci->flags & CPUFREQ_CONST_LOOPS)
311 return;
312
313 if (!l_p_j_ref_freq) {
314 l_p_j_ref = loops_per_jiffy;
315 l_p_j_ref_freq = ci->old;
316 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
317 l_p_j_ref, l_p_j_ref_freq);
318 }
319 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
320 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
321 ci->new);
322 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
323 loops_per_jiffy, ci->new);
324 }
325 #endif
326 }
327
328
329
330
331
332
333
334
335
336
337
338 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
339 struct cpufreq_freqs *freqs,
340 unsigned int state)
341 {
342 int cpu;
343
344 BUG_ON(irqs_disabled());
345
346 if (cpufreq_disabled())
347 return;
348
349 freqs->policy = policy;
350 freqs->flags = cpufreq_driver->flags;
351 pr_debug("notification %u of frequency transition to %u kHz\n",
352 state, freqs->new);
353
354 switch (state) {
355 case CPUFREQ_PRECHANGE:
356
357
358
359
360
361 if (policy->cur && policy->cur != freqs->old) {
362 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
363 freqs->old, policy->cur);
364 freqs->old = policy->cur;
365 }
366
367 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
368 CPUFREQ_PRECHANGE, freqs);
369
370 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
371 break;
372
373 case CPUFREQ_POSTCHANGE:
374 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
375 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
376 cpumask_pr_args(policy->cpus));
377
378 for_each_cpu(cpu, policy->cpus)
379 trace_cpu_frequency(freqs->new, cpu);
380
381 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
382 CPUFREQ_POSTCHANGE, freqs);
383
384 cpufreq_stats_record_transition(policy, freqs->new);
385 policy->cur = freqs->new;
386 }
387 }
388
389
390 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
391 struct cpufreq_freqs *freqs, int transition_failed)
392 {
393 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
394 if (!transition_failed)
395 return;
396
397 swap(freqs->old, freqs->new);
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
400 }
401
402 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
403 struct cpufreq_freqs *freqs)
404 {
405
406
407
408
409
410
411
412
413
414 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
415 && current == policy->transition_task);
416
417 wait:
418 wait_event(policy->transition_wait, !policy->transition_ongoing);
419
420 spin_lock(&policy->transition_lock);
421
422 if (unlikely(policy->transition_ongoing)) {
423 spin_unlock(&policy->transition_lock);
424 goto wait;
425 }
426
427 policy->transition_ongoing = true;
428 policy->transition_task = current;
429
430 spin_unlock(&policy->transition_lock);
431
432 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
433 }
434 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
435
436 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
437 struct cpufreq_freqs *freqs, int transition_failed)
438 {
439 if (WARN_ON(!policy->transition_ongoing))
440 return;
441
442 cpufreq_notify_post_transition(policy, freqs, transition_failed);
443
444 policy->transition_ongoing = false;
445 policy->transition_task = NULL;
446
447 wake_up(&policy->transition_wait);
448 }
449 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
450
451
452
453
454
455 static int cpufreq_fast_switch_count;
456 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
457
458 static void cpufreq_list_transition_notifiers(void)
459 {
460 struct notifier_block *nb;
461
462 pr_info("Registered transition notifiers:\n");
463
464 mutex_lock(&cpufreq_transition_notifier_list.mutex);
465
466 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
467 pr_info("%pS\n", nb->notifier_call);
468
469 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
470 }
471
472
473
474
475
476
477
478
479
480
481
482
483 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
484 {
485 lockdep_assert_held(&policy->rwsem);
486
487 if (!policy->fast_switch_possible)
488 return;
489
490 mutex_lock(&cpufreq_fast_switch_lock);
491 if (cpufreq_fast_switch_count >= 0) {
492 cpufreq_fast_switch_count++;
493 policy->fast_switch_enabled = true;
494 } else {
495 pr_warn("CPU%u: Fast frequency switching not enabled\n",
496 policy->cpu);
497 cpufreq_list_transition_notifiers();
498 }
499 mutex_unlock(&cpufreq_fast_switch_lock);
500 }
501 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
502
503
504
505
506
507 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
508 {
509 mutex_lock(&cpufreq_fast_switch_lock);
510 if (policy->fast_switch_enabled) {
511 policy->fast_switch_enabled = false;
512 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
513 cpufreq_fast_switch_count--;
514 }
515 mutex_unlock(&cpufreq_fast_switch_lock);
516 }
517 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
518
519
520
521
522
523
524
525
526
527
528
529 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
530 unsigned int target_freq)
531 {
532 target_freq = clamp_val(target_freq, policy->min, policy->max);
533 policy->cached_target_freq = target_freq;
534
535 if (cpufreq_driver->target_index) {
536 int idx;
537
538 idx = cpufreq_frequency_table_target(policy, target_freq,
539 CPUFREQ_RELATION_L);
540 policy->cached_resolved_idx = idx;
541 return policy->freq_table[idx].frequency;
542 }
543
544 if (cpufreq_driver->resolve_freq)
545 return cpufreq_driver->resolve_freq(policy, target_freq);
546
547 return target_freq;
548 }
549 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
550
551 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
552 {
553 unsigned int latency;
554
555 if (policy->transition_delay_us)
556 return policy->transition_delay_us;
557
558 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
559 if (latency) {
560
561
562
563
564
565
566
567
568
569
570 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
571 }
572
573 return LATENCY_MULTIPLIER;
574 }
575 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
576
577
578
579
580 static ssize_t show_boost(struct kobject *kobj,
581 struct kobj_attribute *attr, char *buf)
582 {
583 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
584 }
585
586 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
587 const char *buf, size_t count)
588 {
589 int ret, enable;
590
591 ret = sscanf(buf, "%d", &enable);
592 if (ret != 1 || enable < 0 || enable > 1)
593 return -EINVAL;
594
595 if (cpufreq_boost_trigger_state(enable)) {
596 pr_err("%s: Cannot %s BOOST!\n",
597 __func__, enable ? "enable" : "disable");
598 return -EINVAL;
599 }
600
601 pr_debug("%s: cpufreq BOOST %s\n",
602 __func__, enable ? "enabled" : "disabled");
603
604 return count;
605 }
606 define_one_global_rw(boost);
607
608 static struct cpufreq_governor *find_governor(const char *str_governor)
609 {
610 struct cpufreq_governor *t;
611
612 for_each_governor(t)
613 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
614 return t;
615
616 return NULL;
617 }
618
619 static unsigned int cpufreq_parse_policy(char *str_governor)
620 {
621 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
622 return CPUFREQ_POLICY_PERFORMANCE;
623
624 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
625 return CPUFREQ_POLICY_POWERSAVE;
626
627 return CPUFREQ_POLICY_UNKNOWN;
628 }
629
630
631
632
633
634 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
635 {
636 struct cpufreq_governor *t;
637
638 mutex_lock(&cpufreq_governor_mutex);
639
640 t = find_governor(str_governor);
641 if (!t) {
642 int ret;
643
644 mutex_unlock(&cpufreq_governor_mutex);
645
646 ret = request_module("cpufreq_%s", str_governor);
647 if (ret)
648 return NULL;
649
650 mutex_lock(&cpufreq_governor_mutex);
651
652 t = find_governor(str_governor);
653 }
654 if (t && !try_module_get(t->owner))
655 t = NULL;
656
657 mutex_unlock(&cpufreq_governor_mutex);
658
659 return t;
660 }
661
662
663
664
665
666
667
668
669
670 #define show_one(file_name, object) \
671 static ssize_t show_##file_name \
672 (struct cpufreq_policy *policy, char *buf) \
673 { \
674 return sprintf(buf, "%u\n", policy->object); \
675 }
676
677 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
678 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
679 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
680 show_one(scaling_min_freq, min);
681 show_one(scaling_max_freq, max);
682
683 __weak unsigned int arch_freq_get_on_cpu(int cpu)
684 {
685 return 0;
686 }
687
688 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
689 {
690 ssize_t ret;
691 unsigned int freq;
692
693 freq = arch_freq_get_on_cpu(policy->cpu);
694 if (freq)
695 ret = sprintf(buf, "%u\n", freq);
696 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
697 cpufreq_driver->get)
698 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
699 else
700 ret = sprintf(buf, "%u\n", policy->cur);
701 return ret;
702 }
703
704
705
706
707 #define store_one(file_name, object) \
708 static ssize_t store_##file_name \
709 (struct cpufreq_policy *policy, const char *buf, size_t count) \
710 { \
711 unsigned long val; \
712 int ret; \
713 \
714 ret = sscanf(buf, "%lu", &val); \
715 if (ret != 1) \
716 return -EINVAL; \
717 \
718 ret = freq_qos_update_request(policy->object##_freq_req, val);\
719 return ret >= 0 ? count : ret; \
720 }
721
722 store_one(scaling_min_freq, min);
723 store_one(scaling_max_freq, max);
724
725
726
727
728 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
729 char *buf)
730 {
731 unsigned int cur_freq = __cpufreq_get(policy);
732
733 if (cur_freq)
734 return sprintf(buf, "%u\n", cur_freq);
735
736 return sprintf(buf, "<unknown>\n");
737 }
738
739
740
741
742 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
743 {
744 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
745 return sprintf(buf, "powersave\n");
746 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
747 return sprintf(buf, "performance\n");
748 else if (policy->governor)
749 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
750 policy->governor->name);
751 return -EINVAL;
752 }
753
754
755
756
757 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
758 const char *buf, size_t count)
759 {
760 char str_governor[16];
761 int ret;
762
763 ret = sscanf(buf, "%15s", str_governor);
764 if (ret != 1)
765 return -EINVAL;
766
767 if (cpufreq_driver->setpolicy) {
768 unsigned int new_pol;
769
770 new_pol = cpufreq_parse_policy(str_governor);
771 if (!new_pol)
772 return -EINVAL;
773
774 ret = cpufreq_set_policy(policy, NULL, new_pol);
775 } else {
776 struct cpufreq_governor *new_gov;
777
778 new_gov = cpufreq_parse_governor(str_governor);
779 if (!new_gov)
780 return -EINVAL;
781
782 ret = cpufreq_set_policy(policy, new_gov,
783 CPUFREQ_POLICY_UNKNOWN);
784
785 module_put(new_gov->owner);
786 }
787
788 return ret ? ret : count;
789 }
790
791
792
793
794 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
795 {
796 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
797 }
798
799
800
801
802 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
803 char *buf)
804 {
805 ssize_t i = 0;
806 struct cpufreq_governor *t;
807
808 if (!has_target()) {
809 i += sprintf(buf, "performance powersave");
810 goto out;
811 }
812
813 for_each_governor(t) {
814 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
815 - (CPUFREQ_NAME_LEN + 2)))
816 goto out;
817 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
818 }
819 out:
820 i += sprintf(&buf[i], "\n");
821 return i;
822 }
823
824 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
825 {
826 ssize_t i = 0;
827 unsigned int cpu;
828
829 for_each_cpu(cpu, mask) {
830 if (i)
831 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
832 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
833 if (i >= (PAGE_SIZE - 5))
834 break;
835 }
836 i += sprintf(&buf[i], "\n");
837 return i;
838 }
839 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
840
841
842
843
844
845 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
846 {
847 return cpufreq_show_cpus(policy->related_cpus, buf);
848 }
849
850
851
852
853 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
854 {
855 return cpufreq_show_cpus(policy->cpus, buf);
856 }
857
858 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
859 const char *buf, size_t count)
860 {
861 unsigned int freq = 0;
862 unsigned int ret;
863
864 if (!policy->governor || !policy->governor->store_setspeed)
865 return -EINVAL;
866
867 ret = sscanf(buf, "%u", &freq);
868 if (ret != 1)
869 return -EINVAL;
870
871 policy->governor->store_setspeed(policy, freq);
872
873 return count;
874 }
875
876 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
877 {
878 if (!policy->governor || !policy->governor->show_setspeed)
879 return sprintf(buf, "<unsupported>\n");
880
881 return policy->governor->show_setspeed(policy, buf);
882 }
883
884
885
886
887 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
888 {
889 unsigned int limit;
890 int ret;
891 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
892 if (!ret)
893 return sprintf(buf, "%u\n", limit);
894 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
895 }
896
897 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
898 cpufreq_freq_attr_ro(cpuinfo_min_freq);
899 cpufreq_freq_attr_ro(cpuinfo_max_freq);
900 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
901 cpufreq_freq_attr_ro(scaling_available_governors);
902 cpufreq_freq_attr_ro(scaling_driver);
903 cpufreq_freq_attr_ro(scaling_cur_freq);
904 cpufreq_freq_attr_ro(bios_limit);
905 cpufreq_freq_attr_ro(related_cpus);
906 cpufreq_freq_attr_ro(affected_cpus);
907 cpufreq_freq_attr_rw(scaling_min_freq);
908 cpufreq_freq_attr_rw(scaling_max_freq);
909 cpufreq_freq_attr_rw(scaling_governor);
910 cpufreq_freq_attr_rw(scaling_setspeed);
911
912 static struct attribute *default_attrs[] = {
913 &cpuinfo_min_freq.attr,
914 &cpuinfo_max_freq.attr,
915 &cpuinfo_transition_latency.attr,
916 &scaling_min_freq.attr,
917 &scaling_max_freq.attr,
918 &affected_cpus.attr,
919 &related_cpus.attr,
920 &scaling_governor.attr,
921 &scaling_driver.attr,
922 &scaling_available_governors.attr,
923 &scaling_setspeed.attr,
924 NULL
925 };
926
927 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
928 #define to_attr(a) container_of(a, struct freq_attr, attr)
929
930 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
931 {
932 struct cpufreq_policy *policy = to_policy(kobj);
933 struct freq_attr *fattr = to_attr(attr);
934 ssize_t ret;
935
936 if (!fattr->show)
937 return -EIO;
938
939 down_read(&policy->rwsem);
940 ret = fattr->show(policy, buf);
941 up_read(&policy->rwsem);
942
943 return ret;
944 }
945
946 static ssize_t store(struct kobject *kobj, struct attribute *attr,
947 const char *buf, size_t count)
948 {
949 struct cpufreq_policy *policy = to_policy(kobj);
950 struct freq_attr *fattr = to_attr(attr);
951 ssize_t ret = -EINVAL;
952
953 if (!fattr->store)
954 return -EIO;
955
956
957
958
959
960 if (!cpus_read_trylock())
961 return -EBUSY;
962
963 if (cpu_online(policy->cpu)) {
964 down_write(&policy->rwsem);
965 ret = fattr->store(policy, buf, count);
966 up_write(&policy->rwsem);
967 }
968
969 cpus_read_unlock();
970
971 return ret;
972 }
973
974 static void cpufreq_sysfs_release(struct kobject *kobj)
975 {
976 struct cpufreq_policy *policy = to_policy(kobj);
977 pr_debug("last reference is dropped\n");
978 complete(&policy->kobj_unregister);
979 }
980
981 static const struct sysfs_ops sysfs_ops = {
982 .show = show,
983 .store = store,
984 };
985
986 static struct kobj_type ktype_cpufreq = {
987 .sysfs_ops = &sysfs_ops,
988 .default_attrs = default_attrs,
989 .release = cpufreq_sysfs_release,
990 };
991
992 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
993 {
994 struct device *dev = get_cpu_device(cpu);
995
996 if (unlikely(!dev))
997 return;
998
999 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1000 return;
1001
1002 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1003 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1004 dev_err(dev, "cpufreq symlink creation failed\n");
1005 }
1006
1007 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1008 struct device *dev)
1009 {
1010 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1011 sysfs_remove_link(&dev->kobj, "cpufreq");
1012 }
1013
1014 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1015 {
1016 struct freq_attr **drv_attr;
1017 int ret = 0;
1018
1019
1020 drv_attr = cpufreq_driver->attr;
1021 while (drv_attr && *drv_attr) {
1022 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1023 if (ret)
1024 return ret;
1025 drv_attr++;
1026 }
1027 if (cpufreq_driver->get) {
1028 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1029 if (ret)
1030 return ret;
1031 }
1032
1033 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1034 if (ret)
1035 return ret;
1036
1037 if (cpufreq_driver->bios_limit) {
1038 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1039 if (ret)
1040 return ret;
1041 }
1042
1043 return 0;
1044 }
1045
1046 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1047 {
1048 return NULL;
1049 }
1050
1051 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1052 {
1053 struct cpufreq_governor *def_gov = cpufreq_default_governor();
1054 struct cpufreq_governor *gov = NULL;
1055 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1056
1057 if (has_target()) {
1058
1059 gov = find_governor(policy->last_governor);
1060 if (gov) {
1061 pr_debug("Restoring governor %s for cpu %d\n",
1062 policy->governor->name, policy->cpu);
1063 } else if (def_gov) {
1064 gov = def_gov;
1065 } else {
1066 return -ENODATA;
1067 }
1068 } else {
1069
1070 if (policy->last_policy) {
1071 pol = policy->last_policy;
1072 } else if (def_gov) {
1073 pol = cpufreq_parse_policy(def_gov->name);
1074
1075
1076
1077
1078
1079 if (pol == CPUFREQ_POLICY_UNKNOWN)
1080 pol = policy->policy;
1081 }
1082 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1083 pol != CPUFREQ_POLICY_POWERSAVE)
1084 return -ENODATA;
1085 }
1086
1087 return cpufreq_set_policy(policy, gov, pol);
1088 }
1089
1090 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1091 {
1092 int ret = 0;
1093
1094
1095 if (cpumask_test_cpu(cpu, policy->cpus))
1096 return 0;
1097
1098 down_write(&policy->rwsem);
1099 if (has_target())
1100 cpufreq_stop_governor(policy);
1101
1102 cpumask_set_cpu(cpu, policy->cpus);
1103
1104 if (has_target()) {
1105 ret = cpufreq_start_governor(policy);
1106 if (ret)
1107 pr_err("%s: Failed to start governor\n", __func__);
1108 }
1109 up_write(&policy->rwsem);
1110 return ret;
1111 }
1112
1113 void refresh_frequency_limits(struct cpufreq_policy *policy)
1114 {
1115 if (!policy_is_inactive(policy)) {
1116 pr_debug("updating policy for CPU %u\n", policy->cpu);
1117
1118 cpufreq_set_policy(policy, policy->governor, policy->policy);
1119 }
1120 }
1121 EXPORT_SYMBOL(refresh_frequency_limits);
1122
1123 static void handle_update(struct work_struct *work)
1124 {
1125 struct cpufreq_policy *policy =
1126 container_of(work, struct cpufreq_policy, update);
1127
1128 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1129 down_write(&policy->rwsem);
1130 refresh_frequency_limits(policy);
1131 up_write(&policy->rwsem);
1132 }
1133
1134 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1135 void *data)
1136 {
1137 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1138
1139 schedule_work(&policy->update);
1140 return 0;
1141 }
1142
1143 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1144 void *data)
1145 {
1146 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1147
1148 schedule_work(&policy->update);
1149 return 0;
1150 }
1151
1152 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1153 {
1154 struct kobject *kobj;
1155 struct completion *cmp;
1156
1157 down_write(&policy->rwsem);
1158 cpufreq_stats_free_table(policy);
1159 kobj = &policy->kobj;
1160 cmp = &policy->kobj_unregister;
1161 up_write(&policy->rwsem);
1162 kobject_put(kobj);
1163
1164
1165
1166
1167
1168
1169 pr_debug("waiting for dropping of refcount\n");
1170 wait_for_completion(cmp);
1171 pr_debug("wait complete\n");
1172 }
1173
1174 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1175 {
1176 struct cpufreq_policy *policy;
1177 struct device *dev = get_cpu_device(cpu);
1178 int ret;
1179
1180 if (!dev)
1181 return NULL;
1182
1183 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1184 if (!policy)
1185 return NULL;
1186
1187 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1188 goto err_free_policy;
1189
1190 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1191 goto err_free_cpumask;
1192
1193 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1194 goto err_free_rcpumask;
1195
1196 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1197 cpufreq_global_kobject, "policy%u", cpu);
1198 if (ret) {
1199 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1200
1201
1202
1203
1204
1205 kobject_put(&policy->kobj);
1206 goto err_free_real_cpus;
1207 }
1208
1209 freq_constraints_init(&policy->constraints);
1210
1211 policy->nb_min.notifier_call = cpufreq_notifier_min;
1212 policy->nb_max.notifier_call = cpufreq_notifier_max;
1213
1214 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1215 &policy->nb_min);
1216 if (ret) {
1217 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1218 ret, cpumask_pr_args(policy->cpus));
1219 goto err_kobj_remove;
1220 }
1221
1222 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1223 &policy->nb_max);
1224 if (ret) {
1225 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1226 ret, cpumask_pr_args(policy->cpus));
1227 goto err_min_qos_notifier;
1228 }
1229
1230 INIT_LIST_HEAD(&policy->policy_list);
1231 init_rwsem(&policy->rwsem);
1232 spin_lock_init(&policy->transition_lock);
1233 init_waitqueue_head(&policy->transition_wait);
1234 init_completion(&policy->kobj_unregister);
1235 INIT_WORK(&policy->update, handle_update);
1236
1237 policy->cpu = cpu;
1238 return policy;
1239
1240 err_min_qos_notifier:
1241 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1242 &policy->nb_min);
1243 err_kobj_remove:
1244 cpufreq_policy_put_kobj(policy);
1245 err_free_real_cpus:
1246 free_cpumask_var(policy->real_cpus);
1247 err_free_rcpumask:
1248 free_cpumask_var(policy->related_cpus);
1249 err_free_cpumask:
1250 free_cpumask_var(policy->cpus);
1251 err_free_policy:
1252 kfree(policy);
1253
1254 return NULL;
1255 }
1256
1257 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1258 {
1259 unsigned long flags;
1260 int cpu;
1261
1262
1263 write_lock_irqsave(&cpufreq_driver_lock, flags);
1264 list_del(&policy->policy_list);
1265
1266 for_each_cpu(cpu, policy->related_cpus)
1267 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1268 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1269
1270 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1271 &policy->nb_max);
1272 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1273 &policy->nb_min);
1274
1275
1276 cancel_work_sync(&policy->update);
1277
1278 if (policy->max_freq_req) {
1279
1280
1281
1282
1283 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1284 CPUFREQ_REMOVE_POLICY, policy);
1285 freq_qos_remove_request(policy->max_freq_req);
1286 }
1287
1288 freq_qos_remove_request(policy->min_freq_req);
1289 kfree(policy->min_freq_req);
1290
1291 cpufreq_policy_put_kobj(policy);
1292 free_cpumask_var(policy->real_cpus);
1293 free_cpumask_var(policy->related_cpus);
1294 free_cpumask_var(policy->cpus);
1295 kfree(policy);
1296 }
1297
1298 static int cpufreq_online(unsigned int cpu)
1299 {
1300 struct cpufreq_policy *policy;
1301 bool new_policy;
1302 unsigned long flags;
1303 unsigned int j;
1304 int ret;
1305
1306 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1307
1308
1309 policy = per_cpu(cpufreq_cpu_data, cpu);
1310 if (policy) {
1311 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1312 if (!policy_is_inactive(policy))
1313 return cpufreq_add_policy_cpu(policy, cpu);
1314
1315
1316 new_policy = false;
1317 down_write(&policy->rwsem);
1318 policy->cpu = cpu;
1319 policy->governor = NULL;
1320 up_write(&policy->rwsem);
1321 } else {
1322 new_policy = true;
1323 policy = cpufreq_policy_alloc(cpu);
1324 if (!policy)
1325 return -ENOMEM;
1326 }
1327
1328 if (!new_policy && cpufreq_driver->online) {
1329 ret = cpufreq_driver->online(policy);
1330 if (ret) {
1331 pr_debug("%s: %d: initialization failed\n", __func__,
1332 __LINE__);
1333 goto out_exit_policy;
1334 }
1335
1336
1337 cpumask_copy(policy->cpus, policy->related_cpus);
1338 } else {
1339 cpumask_copy(policy->cpus, cpumask_of(cpu));
1340
1341
1342
1343
1344
1345 ret = cpufreq_driver->init(policy);
1346 if (ret) {
1347 pr_debug("%s: %d: initialization failed\n", __func__,
1348 __LINE__);
1349 goto out_free_policy;
1350 }
1351
1352 ret = cpufreq_table_validate_and_sort(policy);
1353 if (ret)
1354 goto out_exit_policy;
1355
1356
1357 cpumask_copy(policy->related_cpus, policy->cpus);
1358 }
1359
1360 down_write(&policy->rwsem);
1361
1362
1363
1364
1365 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1366
1367 if (new_policy) {
1368 for_each_cpu(j, policy->related_cpus) {
1369 per_cpu(cpufreq_cpu_data, j) = policy;
1370 add_cpu_dev_symlink(policy, j);
1371 }
1372
1373 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1374 GFP_KERNEL);
1375 if (!policy->min_freq_req)
1376 goto out_destroy_policy;
1377
1378 ret = freq_qos_add_request(&policy->constraints,
1379 policy->min_freq_req, FREQ_QOS_MIN,
1380 policy->min);
1381 if (ret < 0) {
1382
1383
1384
1385
1386 kfree(policy->min_freq_req);
1387 policy->min_freq_req = NULL;
1388 goto out_destroy_policy;
1389 }
1390
1391
1392
1393
1394
1395
1396 policy->max_freq_req = policy->min_freq_req + 1;
1397
1398 ret = freq_qos_add_request(&policy->constraints,
1399 policy->max_freq_req, FREQ_QOS_MAX,
1400 policy->max);
1401 if (ret < 0) {
1402 policy->max_freq_req = NULL;
1403 goto out_destroy_policy;
1404 }
1405
1406 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1407 CPUFREQ_CREATE_POLICY, policy);
1408 }
1409
1410 if (cpufreq_driver->get && has_target()) {
1411 policy->cur = cpufreq_driver->get(policy->cpu);
1412 if (!policy->cur) {
1413 pr_err("%s: ->get() failed\n", __func__);
1414 goto out_destroy_policy;
1415 }
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1437 && has_target()) {
1438
1439 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1440 if (ret == -EINVAL) {
1441
1442 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1443 __func__, policy->cpu, policy->cur);
1444 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1445 CPUFREQ_RELATION_L);
1446
1447
1448
1449
1450
1451
1452 BUG_ON(ret);
1453 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1454 __func__, policy->cpu, policy->cur);
1455 }
1456 }
1457
1458 if (new_policy) {
1459 ret = cpufreq_add_dev_interface(policy);
1460 if (ret)
1461 goto out_destroy_policy;
1462
1463 cpufreq_stats_create_table(policy);
1464
1465 write_lock_irqsave(&cpufreq_driver_lock, flags);
1466 list_add(&policy->policy_list, &cpufreq_policy_list);
1467 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1468 }
1469
1470 ret = cpufreq_init_policy(policy);
1471 if (ret) {
1472 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1473 __func__, cpu, ret);
1474 goto out_destroy_policy;
1475 }
1476
1477 up_write(&policy->rwsem);
1478
1479 kobject_uevent(&policy->kobj, KOBJ_ADD);
1480
1481
1482 if (cpufreq_driver->ready)
1483 cpufreq_driver->ready(policy);
1484
1485 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1486 policy->cdev = of_cpufreq_cooling_register(policy);
1487
1488 pr_debug("initialization complete\n");
1489
1490 return 0;
1491
1492 out_destroy_policy:
1493 for_each_cpu(j, policy->real_cpus)
1494 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1495
1496 up_write(&policy->rwsem);
1497
1498 out_exit_policy:
1499 if (cpufreq_driver->exit)
1500 cpufreq_driver->exit(policy);
1501
1502 out_free_policy:
1503 cpufreq_policy_free(policy);
1504 return ret;
1505 }
1506
1507
1508
1509
1510
1511
1512 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1513 {
1514 struct cpufreq_policy *policy;
1515 unsigned cpu = dev->id;
1516 int ret;
1517
1518 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1519
1520 if (cpu_online(cpu)) {
1521 ret = cpufreq_online(cpu);
1522 if (ret)
1523 return ret;
1524 }
1525
1526
1527 policy = per_cpu(cpufreq_cpu_data, cpu);
1528 if (policy)
1529 add_cpu_dev_symlink(policy, cpu);
1530
1531 return 0;
1532 }
1533
1534 static int cpufreq_offline(unsigned int cpu)
1535 {
1536 struct cpufreq_policy *policy;
1537 int ret;
1538
1539 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1540
1541 policy = cpufreq_cpu_get_raw(cpu);
1542 if (!policy) {
1543 pr_debug("%s: No cpu_data found\n", __func__);
1544 return 0;
1545 }
1546
1547 down_write(&policy->rwsem);
1548 if (has_target())
1549 cpufreq_stop_governor(policy);
1550
1551 cpumask_clear_cpu(cpu, policy->cpus);
1552
1553 if (policy_is_inactive(policy)) {
1554 if (has_target())
1555 strncpy(policy->last_governor, policy->governor->name,
1556 CPUFREQ_NAME_LEN);
1557 else
1558 policy->last_policy = policy->policy;
1559 } else if (cpu == policy->cpu) {
1560
1561 policy->cpu = cpumask_any(policy->cpus);
1562 }
1563
1564
1565 if (!policy_is_inactive(policy)) {
1566 if (has_target()) {
1567 ret = cpufreq_start_governor(policy);
1568 if (ret)
1569 pr_err("%s: Failed to start governor\n", __func__);
1570 }
1571
1572 goto unlock;
1573 }
1574
1575 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1576 cpufreq_cooling_unregister(policy->cdev);
1577 policy->cdev = NULL;
1578 }
1579
1580 if (cpufreq_driver->stop_cpu)
1581 cpufreq_driver->stop_cpu(policy);
1582
1583 if (has_target())
1584 cpufreq_exit_governor(policy);
1585
1586
1587
1588
1589
1590 if (cpufreq_driver->offline) {
1591 cpufreq_driver->offline(policy);
1592 } else if (cpufreq_driver->exit) {
1593 cpufreq_driver->exit(policy);
1594 policy->freq_table = NULL;
1595 }
1596
1597 unlock:
1598 up_write(&policy->rwsem);
1599 return 0;
1600 }
1601
1602
1603
1604
1605
1606
1607 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1608 {
1609 unsigned int cpu = dev->id;
1610 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1611
1612 if (!policy)
1613 return;
1614
1615 if (cpu_online(cpu))
1616 cpufreq_offline(cpu);
1617
1618 cpumask_clear_cpu(cpu, policy->real_cpus);
1619 remove_cpu_dev_symlink(policy, dev);
1620
1621 if (cpumask_empty(policy->real_cpus)) {
1622
1623 if (cpufreq_driver->offline)
1624 cpufreq_driver->exit(policy);
1625
1626 cpufreq_policy_free(policy);
1627 }
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1640 unsigned int new_freq)
1641 {
1642 struct cpufreq_freqs freqs;
1643
1644 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1645 policy->cur, new_freq);
1646
1647 freqs.old = policy->cur;
1648 freqs.new = new_freq;
1649
1650 cpufreq_freq_transition_begin(policy, &freqs);
1651 cpufreq_freq_transition_end(policy, &freqs, 0);
1652 }
1653
1654 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1655 {
1656 unsigned int new_freq;
1657
1658 new_freq = cpufreq_driver->get(policy->cpu);
1659 if (!new_freq)
1660 return 0;
1661
1662
1663
1664
1665
1666 if (policy->fast_switch_enabled || !has_target())
1667 return new_freq;
1668
1669 if (policy->cur != new_freq) {
1670 cpufreq_out_of_sync(policy, new_freq);
1671 if (update)
1672 schedule_work(&policy->update);
1673 }
1674
1675 return new_freq;
1676 }
1677
1678
1679
1680
1681
1682
1683
1684
1685 unsigned int cpufreq_quick_get(unsigned int cpu)
1686 {
1687 struct cpufreq_policy *policy;
1688 unsigned int ret_freq = 0;
1689 unsigned long flags;
1690
1691 read_lock_irqsave(&cpufreq_driver_lock, flags);
1692
1693 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1694 ret_freq = cpufreq_driver->get(cpu);
1695 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1696 return ret_freq;
1697 }
1698
1699 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1700
1701 policy = cpufreq_cpu_get(cpu);
1702 if (policy) {
1703 ret_freq = policy->cur;
1704 cpufreq_cpu_put(policy);
1705 }
1706
1707 return ret_freq;
1708 }
1709 EXPORT_SYMBOL(cpufreq_quick_get);
1710
1711
1712
1713
1714
1715
1716
1717 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1718 {
1719 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1720 unsigned int ret_freq = 0;
1721
1722 if (policy) {
1723 ret_freq = policy->max;
1724 cpufreq_cpu_put(policy);
1725 }
1726
1727 return ret_freq;
1728 }
1729 EXPORT_SYMBOL(cpufreq_quick_get_max);
1730
1731 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1732 {
1733 if (unlikely(policy_is_inactive(policy)))
1734 return 0;
1735
1736 return cpufreq_verify_current_freq(policy, true);
1737 }
1738
1739
1740
1741
1742
1743
1744
1745 unsigned int cpufreq_get(unsigned int cpu)
1746 {
1747 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1748 unsigned int ret_freq = 0;
1749
1750 if (policy) {
1751 down_read(&policy->rwsem);
1752 if (cpufreq_driver->get)
1753 ret_freq = __cpufreq_get(policy);
1754 up_read(&policy->rwsem);
1755
1756 cpufreq_cpu_put(policy);
1757 }
1758
1759 return ret_freq;
1760 }
1761 EXPORT_SYMBOL(cpufreq_get);
1762
1763 static struct subsys_interface cpufreq_interface = {
1764 .name = "cpufreq",
1765 .subsys = &cpu_subsys,
1766 .add_dev = cpufreq_add_dev,
1767 .remove_dev = cpufreq_remove_dev,
1768 };
1769
1770
1771
1772
1773
1774 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1775 {
1776 int ret;
1777
1778 if (!policy->suspend_freq) {
1779 pr_debug("%s: suspend_freq not defined\n", __func__);
1780 return 0;
1781 }
1782
1783 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1784 policy->suspend_freq);
1785
1786 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1787 CPUFREQ_RELATION_H);
1788 if (ret)
1789 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1790 __func__, policy->suspend_freq, ret);
1791
1792 return ret;
1793 }
1794 EXPORT_SYMBOL(cpufreq_generic_suspend);
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804 void cpufreq_suspend(void)
1805 {
1806 struct cpufreq_policy *policy;
1807
1808 if (!cpufreq_driver)
1809 return;
1810
1811 if (!has_target() && !cpufreq_driver->suspend)
1812 goto suspend;
1813
1814 pr_debug("%s: Suspending Governors\n", __func__);
1815
1816 for_each_active_policy(policy) {
1817 if (has_target()) {
1818 down_write(&policy->rwsem);
1819 cpufreq_stop_governor(policy);
1820 up_write(&policy->rwsem);
1821 }
1822
1823 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1824 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1825 cpufreq_driver->name);
1826 }
1827
1828 suspend:
1829 cpufreq_suspended = true;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838 void cpufreq_resume(void)
1839 {
1840 struct cpufreq_policy *policy;
1841 int ret;
1842
1843 if (!cpufreq_driver)
1844 return;
1845
1846 if (unlikely(!cpufreq_suspended))
1847 return;
1848
1849 cpufreq_suspended = false;
1850
1851 if (!has_target() && !cpufreq_driver->resume)
1852 return;
1853
1854 pr_debug("%s: Resuming Governors\n", __func__);
1855
1856 for_each_active_policy(policy) {
1857 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1858 pr_err("%s: Failed to resume driver: %p\n", __func__,
1859 policy);
1860 } else if (has_target()) {
1861 down_write(&policy->rwsem);
1862 ret = cpufreq_start_governor(policy);
1863 up_write(&policy->rwsem);
1864
1865 if (ret)
1866 pr_err("%s: Failed to start governor for policy: %p\n",
1867 __func__, policy);
1868 }
1869 }
1870 }
1871
1872
1873
1874
1875
1876
1877
1878 const char *cpufreq_get_current_driver(void)
1879 {
1880 if (cpufreq_driver)
1881 return cpufreq_driver->name;
1882
1883 return NULL;
1884 }
1885 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1886
1887
1888
1889
1890
1891
1892
1893 void *cpufreq_get_driver_data(void)
1894 {
1895 if (cpufreq_driver)
1896 return cpufreq_driver->driver_data;
1897
1898 return NULL;
1899 }
1900 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1920 {
1921 int ret;
1922
1923 if (cpufreq_disabled())
1924 return -EINVAL;
1925
1926 switch (list) {
1927 case CPUFREQ_TRANSITION_NOTIFIER:
1928 mutex_lock(&cpufreq_fast_switch_lock);
1929
1930 if (cpufreq_fast_switch_count > 0) {
1931 mutex_unlock(&cpufreq_fast_switch_lock);
1932 return -EBUSY;
1933 }
1934 ret = srcu_notifier_chain_register(
1935 &cpufreq_transition_notifier_list, nb);
1936 if (!ret)
1937 cpufreq_fast_switch_count--;
1938
1939 mutex_unlock(&cpufreq_fast_switch_lock);
1940 break;
1941 case CPUFREQ_POLICY_NOTIFIER:
1942 ret = blocking_notifier_chain_register(
1943 &cpufreq_policy_notifier_list, nb);
1944 break;
1945 default:
1946 ret = -EINVAL;
1947 }
1948
1949 return ret;
1950 }
1951 EXPORT_SYMBOL(cpufreq_register_notifier);
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1964 {
1965 int ret;
1966
1967 if (cpufreq_disabled())
1968 return -EINVAL;
1969
1970 switch (list) {
1971 case CPUFREQ_TRANSITION_NOTIFIER:
1972 mutex_lock(&cpufreq_fast_switch_lock);
1973
1974 ret = srcu_notifier_chain_unregister(
1975 &cpufreq_transition_notifier_list, nb);
1976 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1977 cpufreq_fast_switch_count++;
1978
1979 mutex_unlock(&cpufreq_fast_switch_lock);
1980 break;
1981 case CPUFREQ_POLICY_NOTIFIER:
1982 ret = blocking_notifier_chain_unregister(
1983 &cpufreq_policy_notifier_list, nb);
1984 break;
1985 default:
1986 ret = -EINVAL;
1987 }
1988
1989 return ret;
1990 }
1991 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2022 unsigned int target_freq)
2023 {
2024 target_freq = clamp_val(target_freq, policy->min, policy->max);
2025
2026 return cpufreq_driver->fast_switch(policy, target_freq);
2027 }
2028 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2029
2030
2031 static int __target_intermediate(struct cpufreq_policy *policy,
2032 struct cpufreq_freqs *freqs, int index)
2033 {
2034 int ret;
2035
2036 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2037
2038
2039 if (!freqs->new)
2040 return 0;
2041
2042 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2043 __func__, policy->cpu, freqs->old, freqs->new);
2044
2045 cpufreq_freq_transition_begin(policy, freqs);
2046 ret = cpufreq_driver->target_intermediate(policy, index);
2047 cpufreq_freq_transition_end(policy, freqs, ret);
2048
2049 if (ret)
2050 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2051 __func__, ret);
2052
2053 return ret;
2054 }
2055
2056 static int __target_index(struct cpufreq_policy *policy, int index)
2057 {
2058 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2059 unsigned int intermediate_freq = 0;
2060 unsigned int newfreq = policy->freq_table[index].frequency;
2061 int retval = -EINVAL;
2062 bool notify;
2063
2064 if (newfreq == policy->cur)
2065 return 0;
2066
2067 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2068 if (notify) {
2069
2070 if (cpufreq_driver->get_intermediate) {
2071 retval = __target_intermediate(policy, &freqs, index);
2072 if (retval)
2073 return retval;
2074
2075 intermediate_freq = freqs.new;
2076
2077 if (intermediate_freq)
2078 freqs.old = freqs.new;
2079 }
2080
2081 freqs.new = newfreq;
2082 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2083 __func__, policy->cpu, freqs.old, freqs.new);
2084
2085 cpufreq_freq_transition_begin(policy, &freqs);
2086 }
2087
2088 retval = cpufreq_driver->target_index(policy, index);
2089 if (retval)
2090 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2091 retval);
2092
2093 if (notify) {
2094 cpufreq_freq_transition_end(policy, &freqs, retval);
2095
2096
2097
2098
2099
2100
2101
2102 if (unlikely(retval && intermediate_freq)) {
2103 freqs.old = intermediate_freq;
2104 freqs.new = policy->restore_freq;
2105 cpufreq_freq_transition_begin(policy, &freqs);
2106 cpufreq_freq_transition_end(policy, &freqs, 0);
2107 }
2108 }
2109
2110 return retval;
2111 }
2112
2113 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2114 unsigned int target_freq,
2115 unsigned int relation)
2116 {
2117 unsigned int old_target_freq = target_freq;
2118 int index;
2119
2120 if (cpufreq_disabled())
2121 return -ENODEV;
2122
2123
2124 target_freq = clamp_val(target_freq, policy->min, policy->max);
2125
2126 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2127 policy->cpu, target_freq, relation, old_target_freq);
2128
2129
2130
2131
2132
2133
2134
2135 if (target_freq == policy->cur)
2136 return 0;
2137
2138
2139 policy->restore_freq = policy->cur;
2140
2141 if (cpufreq_driver->target)
2142 return cpufreq_driver->target(policy, target_freq, relation);
2143
2144 if (!cpufreq_driver->target_index)
2145 return -EINVAL;
2146
2147 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2148
2149 return __target_index(policy, index);
2150 }
2151 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2152
2153 int cpufreq_driver_target(struct cpufreq_policy *policy,
2154 unsigned int target_freq,
2155 unsigned int relation)
2156 {
2157 int ret;
2158
2159 down_write(&policy->rwsem);
2160
2161 ret = __cpufreq_driver_target(policy, target_freq, relation);
2162
2163 up_write(&policy->rwsem);
2164
2165 return ret;
2166 }
2167 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2168
2169 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2170 {
2171 return NULL;
2172 }
2173
2174 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2175 {
2176 int ret;
2177
2178
2179 if (cpufreq_suspended)
2180 return 0;
2181
2182
2183
2184
2185 if (!policy->governor)
2186 return -EINVAL;
2187
2188
2189 if (policy->governor->dynamic_switching &&
2190 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2191 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2192
2193 if (gov) {
2194 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2195 policy->governor->name, gov->name);
2196 policy->governor = gov;
2197 } else {
2198 return -EINVAL;
2199 }
2200 }
2201
2202 if (!try_module_get(policy->governor->owner))
2203 return -EINVAL;
2204
2205 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2206
2207 if (policy->governor->init) {
2208 ret = policy->governor->init(policy);
2209 if (ret) {
2210 module_put(policy->governor->owner);
2211 return ret;
2212 }
2213 }
2214
2215 return 0;
2216 }
2217
2218 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2219 {
2220 if (cpufreq_suspended || !policy->governor)
2221 return;
2222
2223 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2224
2225 if (policy->governor->exit)
2226 policy->governor->exit(policy);
2227
2228 module_put(policy->governor->owner);
2229 }
2230
2231 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2232 {
2233 int ret;
2234
2235 if (cpufreq_suspended)
2236 return 0;
2237
2238 if (!policy->governor)
2239 return -EINVAL;
2240
2241 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2242
2243 if (cpufreq_driver->get)
2244 cpufreq_verify_current_freq(policy, false);
2245
2246 if (policy->governor->start) {
2247 ret = policy->governor->start(policy);
2248 if (ret)
2249 return ret;
2250 }
2251
2252 if (policy->governor->limits)
2253 policy->governor->limits(policy);
2254
2255 return 0;
2256 }
2257
2258 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2259 {
2260 if (cpufreq_suspended || !policy->governor)
2261 return;
2262
2263 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2264
2265 if (policy->governor->stop)
2266 policy->governor->stop(policy);
2267 }
2268
2269 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2270 {
2271 if (cpufreq_suspended || !policy->governor)
2272 return;
2273
2274 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2275
2276 if (policy->governor->limits)
2277 policy->governor->limits(policy);
2278 }
2279
2280 int cpufreq_register_governor(struct cpufreq_governor *governor)
2281 {
2282 int err;
2283
2284 if (!governor)
2285 return -EINVAL;
2286
2287 if (cpufreq_disabled())
2288 return -ENODEV;
2289
2290 mutex_lock(&cpufreq_governor_mutex);
2291
2292 err = -EBUSY;
2293 if (!find_governor(governor->name)) {
2294 err = 0;
2295 list_add(&governor->governor_list, &cpufreq_governor_list);
2296 }
2297
2298 mutex_unlock(&cpufreq_governor_mutex);
2299 return err;
2300 }
2301 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2302
2303 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2304 {
2305 struct cpufreq_policy *policy;
2306 unsigned long flags;
2307
2308 if (!governor)
2309 return;
2310
2311 if (cpufreq_disabled())
2312 return;
2313
2314
2315 read_lock_irqsave(&cpufreq_driver_lock, flags);
2316 for_each_inactive_policy(policy) {
2317 if (!strcmp(policy->last_governor, governor->name)) {
2318 policy->governor = NULL;
2319 strcpy(policy->last_governor, "\0");
2320 }
2321 }
2322 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2323
2324 mutex_lock(&cpufreq_governor_mutex);
2325 list_del(&governor->governor_list);
2326 mutex_unlock(&cpufreq_governor_mutex);
2327 }
2328 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2343 {
2344 struct cpufreq_policy *cpu_policy;
2345 if (!policy)
2346 return -EINVAL;
2347
2348 cpu_policy = cpufreq_cpu_get(cpu);
2349 if (!cpu_policy)
2350 return -EINVAL;
2351
2352 memcpy(policy, cpu_policy, sizeof(*policy));
2353
2354 cpufreq_cpu_put(cpu_policy);
2355 return 0;
2356 }
2357 EXPORT_SYMBOL(cpufreq_get_policy);
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2375 struct cpufreq_governor *new_gov,
2376 unsigned int new_pol)
2377 {
2378 struct cpufreq_policy_data new_data;
2379 struct cpufreq_governor *old_gov;
2380 int ret;
2381
2382 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2383 new_data.freq_table = policy->freq_table;
2384 new_data.cpu = policy->cpu;
2385
2386
2387
2388
2389 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2390 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2391
2392 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2393 new_data.cpu, new_data.min, new_data.max);
2394
2395
2396 ret = cpufreq_driver->verify(&new_data);
2397 if (ret)
2398 return ret;
2399
2400 policy->min = new_data.min;
2401 policy->max = new_data.max;
2402 trace_cpu_frequency_limits(policy);
2403
2404 policy->cached_target_freq = UINT_MAX;
2405
2406 pr_debug("new min and max freqs are %u - %u kHz\n",
2407 policy->min, policy->max);
2408
2409 if (cpufreq_driver->setpolicy) {
2410 policy->policy = new_pol;
2411 pr_debug("setting range\n");
2412 return cpufreq_driver->setpolicy(policy);
2413 }
2414
2415 if (new_gov == policy->governor) {
2416 pr_debug("governor limits update\n");
2417 cpufreq_governor_limits(policy);
2418 return 0;
2419 }
2420
2421 pr_debug("governor switch\n");
2422
2423
2424 old_gov = policy->governor;
2425
2426 if (old_gov) {
2427 cpufreq_stop_governor(policy);
2428 cpufreq_exit_governor(policy);
2429 }
2430
2431
2432 policy->governor = new_gov;
2433 ret = cpufreq_init_governor(policy);
2434 if (!ret) {
2435 ret = cpufreq_start_governor(policy);
2436 if (!ret) {
2437 pr_debug("governor change\n");
2438 sched_cpufreq_governor_change(policy, old_gov);
2439 return 0;
2440 }
2441 cpufreq_exit_governor(policy);
2442 }
2443
2444
2445 pr_debug("starting governor %s failed\n", policy->governor->name);
2446 if (old_gov) {
2447 policy->governor = old_gov;
2448 if (cpufreq_init_governor(policy))
2449 policy->governor = NULL;
2450 else
2451 cpufreq_start_governor(policy);
2452 }
2453
2454 return ret;
2455 }
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466 void cpufreq_update_policy(unsigned int cpu)
2467 {
2468 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2469
2470 if (!policy)
2471 return;
2472
2473
2474
2475
2476
2477 if (cpufreq_driver->get && has_target() &&
2478 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2479 goto unlock;
2480
2481 refresh_frequency_limits(policy);
2482
2483 unlock:
2484 cpufreq_cpu_release(policy);
2485 }
2486 EXPORT_SYMBOL(cpufreq_update_policy);
2487
2488
2489
2490
2491
2492
2493
2494
2495 void cpufreq_update_limits(unsigned int cpu)
2496 {
2497 if (cpufreq_driver->update_limits)
2498 cpufreq_driver->update_limits(cpu);
2499 else
2500 cpufreq_update_policy(cpu);
2501 }
2502 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2503
2504
2505
2506
2507 static int cpufreq_boost_set_sw(int state)
2508 {
2509 struct cpufreq_policy *policy;
2510
2511 for_each_active_policy(policy) {
2512 int ret;
2513
2514 if (!policy->freq_table)
2515 return -ENXIO;
2516
2517 ret = cpufreq_frequency_table_cpuinfo(policy,
2518 policy->freq_table);
2519 if (ret) {
2520 pr_err("%s: Policy frequency update failed\n",
2521 __func__);
2522 return ret;
2523 }
2524
2525 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2526 if (ret < 0)
2527 return ret;
2528 }
2529
2530 return 0;
2531 }
2532
2533 int cpufreq_boost_trigger_state(int state)
2534 {
2535 unsigned long flags;
2536 int ret = 0;
2537
2538 if (cpufreq_driver->boost_enabled == state)
2539 return 0;
2540
2541 write_lock_irqsave(&cpufreq_driver_lock, flags);
2542 cpufreq_driver->boost_enabled = state;
2543 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2544
2545 ret = cpufreq_driver->set_boost(state);
2546 if (ret) {
2547 write_lock_irqsave(&cpufreq_driver_lock, flags);
2548 cpufreq_driver->boost_enabled = !state;
2549 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2550
2551 pr_err("%s: Cannot %s BOOST\n",
2552 __func__, state ? "enable" : "disable");
2553 }
2554
2555 return ret;
2556 }
2557
2558 static bool cpufreq_boost_supported(void)
2559 {
2560 return cpufreq_driver->set_boost;
2561 }
2562
2563 static int create_boost_sysfs_file(void)
2564 {
2565 int ret;
2566
2567 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2568 if (ret)
2569 pr_err("%s: cannot register global BOOST sysfs file\n",
2570 __func__);
2571
2572 return ret;
2573 }
2574
2575 static void remove_boost_sysfs_file(void)
2576 {
2577 if (cpufreq_boost_supported())
2578 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2579 }
2580
2581 int cpufreq_enable_boost_support(void)
2582 {
2583 if (!cpufreq_driver)
2584 return -EINVAL;
2585
2586 if (cpufreq_boost_supported())
2587 return 0;
2588
2589 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2590
2591
2592 return create_boost_sysfs_file();
2593 }
2594 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2595
2596 int cpufreq_boost_enabled(void)
2597 {
2598 return cpufreq_driver->boost_enabled;
2599 }
2600 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2601
2602
2603
2604
2605 static enum cpuhp_state hp_online;
2606
2607 static int cpuhp_cpufreq_online(unsigned int cpu)
2608 {
2609 cpufreq_online(cpu);
2610
2611 return 0;
2612 }
2613
2614 static int cpuhp_cpufreq_offline(unsigned int cpu)
2615 {
2616 cpufreq_offline(cpu);
2617
2618 return 0;
2619 }
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2632 {
2633 unsigned long flags;
2634 int ret;
2635
2636 if (cpufreq_disabled())
2637 return -ENODEV;
2638
2639
2640
2641
2642
2643 if (!get_cpu_device(0))
2644 return -EPROBE_DEFER;
2645
2646 if (!driver_data || !driver_data->verify || !driver_data->init ||
2647 !(driver_data->setpolicy || driver_data->target_index ||
2648 driver_data->target) ||
2649 (driver_data->setpolicy && (driver_data->target_index ||
2650 driver_data->target)) ||
2651 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2652 (!driver_data->online != !driver_data->offline))
2653 return -EINVAL;
2654
2655 pr_debug("trying to register driver %s\n", driver_data->name);
2656
2657
2658 cpus_read_lock();
2659
2660 write_lock_irqsave(&cpufreq_driver_lock, flags);
2661 if (cpufreq_driver) {
2662 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2663 ret = -EEXIST;
2664 goto out;
2665 }
2666 cpufreq_driver = driver_data;
2667 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2668
2669 if (driver_data->setpolicy)
2670 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2671
2672 if (cpufreq_boost_supported()) {
2673 ret = create_boost_sysfs_file();
2674 if (ret)
2675 goto err_null_driver;
2676 }
2677
2678 ret = subsys_interface_register(&cpufreq_interface);
2679 if (ret)
2680 goto err_boost_unreg;
2681
2682 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2683 list_empty(&cpufreq_policy_list)) {
2684
2685 ret = -ENODEV;
2686 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2687 driver_data->name);
2688 goto err_if_unreg;
2689 }
2690
2691 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2692 "cpufreq:online",
2693 cpuhp_cpufreq_online,
2694 cpuhp_cpufreq_offline);
2695 if (ret < 0)
2696 goto err_if_unreg;
2697 hp_online = ret;
2698 ret = 0;
2699
2700 pr_debug("driver %s up and running\n", driver_data->name);
2701 goto out;
2702
2703 err_if_unreg:
2704 subsys_interface_unregister(&cpufreq_interface);
2705 err_boost_unreg:
2706 remove_boost_sysfs_file();
2707 err_null_driver:
2708 write_lock_irqsave(&cpufreq_driver_lock, flags);
2709 cpufreq_driver = NULL;
2710 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2711 out:
2712 cpus_read_unlock();
2713 return ret;
2714 }
2715 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2726 {
2727 unsigned long flags;
2728
2729 if (!cpufreq_driver || (driver != cpufreq_driver))
2730 return -EINVAL;
2731
2732 pr_debug("unregistering driver %s\n", driver->name);
2733
2734
2735 cpus_read_lock();
2736 subsys_interface_unregister(&cpufreq_interface);
2737 remove_boost_sysfs_file();
2738 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2739
2740 write_lock_irqsave(&cpufreq_driver_lock, flags);
2741
2742 cpufreq_driver = NULL;
2743
2744 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2745 cpus_read_unlock();
2746
2747 return 0;
2748 }
2749 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2750
2751 struct kobject *cpufreq_global_kobject;
2752 EXPORT_SYMBOL(cpufreq_global_kobject);
2753
2754 static int __init cpufreq_core_init(void)
2755 {
2756 if (cpufreq_disabled())
2757 return -ENODEV;
2758
2759 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2760 BUG_ON(!cpufreq_global_kobject);
2761
2762 return 0;
2763 }
2764 module_param(off, int, 0444);
2765 core_initcall(cpufreq_core_init);