This source file includes following definitions.
- clk_pm_runtime_get
- clk_pm_runtime_put
- clk_prepare_lock
- clk_prepare_unlock
- clk_enable_lock
- clk_enable_unlock
- clk_core_rate_is_protected
- clk_core_is_prepared
- clk_core_is_enabled
- __clk_get_name
- clk_hw_get_name
- __clk_get_hw
- clk_hw_get_num_parents
- clk_hw_get_parent
- __clk_lookup_subtree
- clk_core_lookup
- of_parse_clkspec
- of_clk_get_hw_from_clkspec
- clk_core_get
- clk_core_fill_parent_index
- clk_core_get_parent_by_index
- clk_hw_get_parent_by_index
- __clk_get_enable_count
- clk_core_get_rate_nolock
- clk_hw_get_rate
- __clk_get_accuracy
- __clk_get_flags
- clk_hw_get_flags
- clk_hw_is_prepared
- clk_hw_rate_is_protected
- clk_hw_is_enabled
- __clk_is_enabled
- mux_is_better_rate
- clk_mux_determine_rate_flags
- __clk_lookup
- clk_core_get_boundaries
- clk_hw_set_rate_range
- __clk_mux_determine_rate
- __clk_mux_determine_rate_closest
- clk_core_rate_unprotect
- clk_core_rate_nuke_protect
- clk_rate_exclusive_put
- clk_core_rate_protect
- clk_core_rate_restore_protect
- clk_rate_exclusive_get
- clk_core_unprepare
- clk_core_unprepare_lock
- clk_unprepare
- clk_core_prepare
- clk_core_prepare_lock
- clk_prepare
- clk_core_disable
- clk_core_disable_lock
- clk_disable
- clk_core_enable
- clk_core_enable_lock
- clk_gate_restore_context
- clk_core_save_context
- clk_core_restore_context
- clk_save_context
- clk_restore_context
- clk_enable
- clk_core_prepare_enable
- clk_core_disable_unprepare
- clk_unprepare_unused_subtree
- clk_disable_unused_subtree
- clk_ignore_unused_setup
- clk_disable_unused
- clk_core_determine_round_nolock
- clk_core_init_rate_req
- clk_core_can_round
- clk_core_round_rate_nolock
- __clk_determine_rate
- clk_hw_round_rate
- clk_round_rate
- __clk_notify
- __clk_recalc_accuracies
- clk_core_get_accuracy
- clk_get_accuracy
- clk_recalc
- __clk_recalc_rates
- clk_core_get_rate
- clk_get_rate
- clk_fetch_parent_index
- clk_core_update_orphan_status
- clk_reparent
- __clk_set_parent_before
- __clk_set_parent_after
- __clk_set_parent
- __clk_speculate_rates
- clk_calc_subtree
- clk_calc_new_rates
- clk_propagate_rate_change
- clk_change_rate
- clk_core_req_round_rate_nolock
- clk_core_set_rate_nolock
- clk_set_rate
- clk_set_rate_exclusive
- clk_set_rate_range
- clk_set_min_rate
- clk_set_max_rate
- clk_get_parent
- __clk_init_parent
- clk_core_reparent
- clk_hw_reparent
- clk_has_parent
- clk_core_set_parent_nolock
- clk_hw_set_parent
- clk_set_parent
- clk_core_set_phase_nolock
- clk_set_phase
- clk_core_get_phase
- clk_get_phase
- clk_core_reset_duty_cycle_nolock
- clk_core_update_duty_cycle_nolock
- clk_core_update_duty_cycle_parent_nolock
- clk_core_set_duty_cycle_nolock
- clk_core_set_duty_cycle_parent_nolock
- clk_set_duty_cycle
- clk_core_get_scaled_duty_cycle
- clk_get_scaled_duty_cycle
- clk_is_match
- clk_summary_show_one
- clk_summary_show_subtree
- clk_summary_show
- clk_dump_one
- clk_dump_subtree
- clk_dump_show
- clk_flags_show
- possible_parent_show
- possible_parents_show
- current_parent_show
- clk_duty_cycle_show
- clk_min_rate_show
- clk_max_rate_show
- clk_debug_create_one
- clk_debug_register
- clk_debug_unregister
- clk_debug_init
- clk_debug_register
- clk_debug_reparent
- clk_debug_unregister
- clk_core_reparent_orphans_nolock
- __clk_core_init
- clk_core_link_consumer
- clk_core_unlink_consumer
- alloc_clk
- free_clk
- clk_hw_create_clk
- clk_cpy_name
- clk_core_populate_parent_map
- clk_core_free_parent_map
- __clk_register
- dev_or_parent_of_node
- clk_register
- clk_hw_register
- of_clk_hw_register
- __clk_release
- clk_nodrv_prepare_enable
- clk_nodrv_disable_unprepare
- clk_nodrv_set_rate
- clk_nodrv_set_parent
- clk_core_evict_parent_cache_subtree
- clk_core_evict_parent_cache
- clk_unregister
- clk_hw_unregister
- devm_clk_release
- devm_clk_hw_release
- devm_clk_register
- devm_clk_hw_register
- devm_clk_match
- devm_clk_hw_match
- devm_clk_unregister
- devm_clk_hw_unregister
- __clk_put
- clk_notifier_register
- clk_notifier_unregister
- clk_core_reparent_orphans
- of_clk_src_simple_get
- of_clk_hw_simple_get
- of_clk_src_onecell_get
- of_clk_hw_onecell_get
- of_clk_add_provider
- of_clk_add_hw_provider
- devm_of_clk_release_provider
- get_clk_provider_node
- devm_of_clk_add_hw_provider
- of_clk_del_provider
- devm_clk_provider_match
- devm_of_clk_del_provider
- of_parse_clkspec
- __of_clk_get_hw_from_provider
- of_clk_get_hw_from_clkspec
- of_clk_get_from_provider
- of_clk_get_hw
- __of_clk_get
- of_clk_get
- of_clk_get_by_name
- of_clk_get_parent_count
- of_clk_get_parent_name
- of_clk_parent_fill
- parent_ready
- of_clk_detect_critical
- of_clk_init
1
2
3
4
5
6
7
8
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk/clk-conf.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/spinlock.h>
15 #include <linux/err.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sched.h>
23 #include <linux/clkdev.h>
24
25 #include "clk.h"
26
27 static DEFINE_SPINLOCK(enable_lock);
28 static DEFINE_MUTEX(prepare_lock);
29
30 static struct task_struct *prepare_owner;
31 static struct task_struct *enable_owner;
32
33 static int prepare_refcnt;
34 static int enable_refcnt;
35
36 static HLIST_HEAD(clk_root_list);
37 static HLIST_HEAD(clk_orphan_list);
38 static LIST_HEAD(clk_notifier_list);
39
40 static struct hlist_head *all_lists[] = {
41 &clk_root_list,
42 &clk_orphan_list,
43 NULL,
44 };
45
46
47
48 struct clk_parent_map {
49 const struct clk_hw *hw;
50 struct clk_core *core;
51 const char *fw_name;
52 const char *name;
53 int index;
54 };
55
56 struct clk_core {
57 const char *name;
58 const struct clk_ops *ops;
59 struct clk_hw *hw;
60 struct module *owner;
61 struct device *dev;
62 struct device_node *of_node;
63 struct clk_core *parent;
64 struct clk_parent_map *parents;
65 u8 num_parents;
66 u8 new_parent_index;
67 unsigned long rate;
68 unsigned long req_rate;
69 unsigned long new_rate;
70 struct clk_core *new_parent;
71 struct clk_core *new_child;
72 unsigned long flags;
73 bool orphan;
74 bool rpm_enabled;
75 unsigned int enable_count;
76 unsigned int prepare_count;
77 unsigned int protect_count;
78 unsigned long min_rate;
79 unsigned long max_rate;
80 unsigned long accuracy;
81 int phase;
82 struct clk_duty duty;
83 struct hlist_head children;
84 struct hlist_node child_node;
85 struct hlist_head clks;
86 unsigned int notifier_count;
87 #ifdef CONFIG_DEBUG_FS
88 struct dentry *dentry;
89 struct hlist_node debug_node;
90 #endif
91 struct kref ref;
92 };
93
94 #define CREATE_TRACE_POINTS
95 #include <trace/events/clk.h>
96
97 struct clk {
98 struct clk_core *core;
99 struct device *dev;
100 const char *dev_id;
101 const char *con_id;
102 unsigned long min_rate;
103 unsigned long max_rate;
104 unsigned int exclusive_count;
105 struct hlist_node clks_node;
106 };
107
108
109 static int clk_pm_runtime_get(struct clk_core *core)
110 {
111 int ret;
112
113 if (!core->rpm_enabled)
114 return 0;
115
116 ret = pm_runtime_get_sync(core->dev);
117 if (ret < 0) {
118 pm_runtime_put_noidle(core->dev);
119 return ret;
120 }
121 return 0;
122 }
123
124 static void clk_pm_runtime_put(struct clk_core *core)
125 {
126 if (!core->rpm_enabled)
127 return;
128
129 pm_runtime_put_sync(core->dev);
130 }
131
132
133 static void clk_prepare_lock(void)
134 {
135 if (!mutex_trylock(&prepare_lock)) {
136 if (prepare_owner == current) {
137 prepare_refcnt++;
138 return;
139 }
140 mutex_lock(&prepare_lock);
141 }
142 WARN_ON_ONCE(prepare_owner != NULL);
143 WARN_ON_ONCE(prepare_refcnt != 0);
144 prepare_owner = current;
145 prepare_refcnt = 1;
146 }
147
148 static void clk_prepare_unlock(void)
149 {
150 WARN_ON_ONCE(prepare_owner != current);
151 WARN_ON_ONCE(prepare_refcnt == 0);
152
153 if (--prepare_refcnt)
154 return;
155 prepare_owner = NULL;
156 mutex_unlock(&prepare_lock);
157 }
158
159 static unsigned long clk_enable_lock(void)
160 __acquires(enable_lock)
161 {
162 unsigned long flags;
163
164
165
166
167
168
169 if (!IS_ENABLED(CONFIG_SMP) ||
170 !spin_trylock_irqsave(&enable_lock, flags)) {
171 if (enable_owner == current) {
172 enable_refcnt++;
173 __acquire(enable_lock);
174 if (!IS_ENABLED(CONFIG_SMP))
175 local_save_flags(flags);
176 return flags;
177 }
178 spin_lock_irqsave(&enable_lock, flags);
179 }
180 WARN_ON_ONCE(enable_owner != NULL);
181 WARN_ON_ONCE(enable_refcnt != 0);
182 enable_owner = current;
183 enable_refcnt = 1;
184 return flags;
185 }
186
187 static void clk_enable_unlock(unsigned long flags)
188 __releases(enable_lock)
189 {
190 WARN_ON_ONCE(enable_owner != current);
191 WARN_ON_ONCE(enable_refcnt == 0);
192
193 if (--enable_refcnt) {
194 __release(enable_lock);
195 return;
196 }
197 enable_owner = NULL;
198 spin_unlock_irqrestore(&enable_lock, flags);
199 }
200
201 static bool clk_core_rate_is_protected(struct clk_core *core)
202 {
203 return core->protect_count;
204 }
205
206 static bool clk_core_is_prepared(struct clk_core *core)
207 {
208 bool ret = false;
209
210
211
212
213
214 if (!core->ops->is_prepared)
215 return core->prepare_count;
216
217 if (!clk_pm_runtime_get(core)) {
218 ret = core->ops->is_prepared(core->hw);
219 clk_pm_runtime_put(core);
220 }
221
222 return ret;
223 }
224
225 static bool clk_core_is_enabled(struct clk_core *core)
226 {
227 bool ret = false;
228
229
230
231
232
233 if (!core->ops->is_enabled)
234 return core->enable_count;
235
236
237
238
239
240
241
242
243
244
245
246 if (core->rpm_enabled) {
247 pm_runtime_get_noresume(core->dev);
248 if (!pm_runtime_active(core->dev)) {
249 ret = false;
250 goto done;
251 }
252 }
253
254 ret = core->ops->is_enabled(core->hw);
255 done:
256 if (core->rpm_enabled)
257 pm_runtime_put(core->dev);
258
259 return ret;
260 }
261
262
263
264 const char *__clk_get_name(const struct clk *clk)
265 {
266 return !clk ? NULL : clk->core->name;
267 }
268 EXPORT_SYMBOL_GPL(__clk_get_name);
269
270 const char *clk_hw_get_name(const struct clk_hw *hw)
271 {
272 return hw->core->name;
273 }
274 EXPORT_SYMBOL_GPL(clk_hw_get_name);
275
276 struct clk_hw *__clk_get_hw(struct clk *clk)
277 {
278 return !clk ? NULL : clk->core->hw;
279 }
280 EXPORT_SYMBOL_GPL(__clk_get_hw);
281
282 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
283 {
284 return hw->core->num_parents;
285 }
286 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
287
288 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
289 {
290 return hw->core->parent ? hw->core->parent->hw : NULL;
291 }
292 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
293
294 static struct clk_core *__clk_lookup_subtree(const char *name,
295 struct clk_core *core)
296 {
297 struct clk_core *child;
298 struct clk_core *ret;
299
300 if (!strcmp(core->name, name))
301 return core;
302
303 hlist_for_each_entry(child, &core->children, child_node) {
304 ret = __clk_lookup_subtree(name, child);
305 if (ret)
306 return ret;
307 }
308
309 return NULL;
310 }
311
312 static struct clk_core *clk_core_lookup(const char *name)
313 {
314 struct clk_core *root_clk;
315 struct clk_core *ret;
316
317 if (!name)
318 return NULL;
319
320
321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
322 ret = __clk_lookup_subtree(name, root_clk);
323 if (ret)
324 return ret;
325 }
326
327
328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
329 ret = __clk_lookup_subtree(name, root_clk);
330 if (ret)
331 return ret;
332 }
333
334 return NULL;
335 }
336
337 #ifdef CONFIG_OF
338 static int of_parse_clkspec(const struct device_node *np, int index,
339 const char *name, struct of_phandle_args *out_args);
340 static struct clk_hw *
341 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
342 #else
343 static inline int of_parse_clkspec(const struct device_node *np, int index,
344 const char *name,
345 struct of_phandle_args *out_args)
346 {
347 return -ENOENT;
348 }
349 static inline struct clk_hw *
350 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
351 {
352 return ERR_PTR(-ENOENT);
353 }
354 #endif
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
393 {
394 const char *name = core->parents[p_index].fw_name;
395 int index = core->parents[p_index].index;
396 struct clk_hw *hw = ERR_PTR(-ENOENT);
397 struct device *dev = core->dev;
398 const char *dev_id = dev ? dev_name(dev) : NULL;
399 struct device_node *np = core->of_node;
400 struct of_phandle_args clkspec;
401
402 if (np && (name || index >= 0) &&
403 !of_parse_clkspec(np, index, name, &clkspec)) {
404 hw = of_clk_get_hw_from_clkspec(&clkspec);
405 of_node_put(clkspec.np);
406 } else if (name) {
407
408
409
410
411 hw = clk_find_hw(dev_id, name);
412 }
413
414 if (IS_ERR(hw))
415 return ERR_CAST(hw);
416
417 return hw->core;
418 }
419
420 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
421 {
422 struct clk_parent_map *entry = &core->parents[index];
423 struct clk_core *parent = ERR_PTR(-ENOENT);
424
425 if (entry->hw) {
426 parent = entry->hw->core;
427
428
429
430
431
432 if (!parent)
433 parent = ERR_PTR(-EPROBE_DEFER);
434 } else {
435 parent = clk_core_get(core, index);
436 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
437 parent = clk_core_lookup(entry->name);
438 }
439
440
441 if (!IS_ERR(parent))
442 entry->core = parent;
443 }
444
445 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
446 u8 index)
447 {
448 if (!core || index >= core->num_parents || !core->parents)
449 return NULL;
450
451 if (!core->parents[index].core)
452 clk_core_fill_parent_index(core, index);
453
454 return core->parents[index].core;
455 }
456
457 struct clk_hw *
458 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
459 {
460 struct clk_core *parent;
461
462 parent = clk_core_get_parent_by_index(hw->core, index);
463
464 return !parent ? NULL : parent->hw;
465 }
466 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
467
468 unsigned int __clk_get_enable_count(struct clk *clk)
469 {
470 return !clk ? 0 : clk->core->enable_count;
471 }
472
473 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
474 {
475 if (!core)
476 return 0;
477
478 if (!core->num_parents || core->parent)
479 return core->rate;
480
481
482
483
484
485
486 return 0;
487 }
488
489 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
490 {
491 return clk_core_get_rate_nolock(hw->core);
492 }
493 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
494
495 static unsigned long __clk_get_accuracy(struct clk_core *core)
496 {
497 if (!core)
498 return 0;
499
500 return core->accuracy;
501 }
502
503 unsigned long __clk_get_flags(struct clk *clk)
504 {
505 return !clk ? 0 : clk->core->flags;
506 }
507 EXPORT_SYMBOL_GPL(__clk_get_flags);
508
509 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
510 {
511 return hw->core->flags;
512 }
513 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
514
515 bool clk_hw_is_prepared(const struct clk_hw *hw)
516 {
517 return clk_core_is_prepared(hw->core);
518 }
519 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
520
521 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
522 {
523 return clk_core_rate_is_protected(hw->core);
524 }
525 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
526
527 bool clk_hw_is_enabled(const struct clk_hw *hw)
528 {
529 return clk_core_is_enabled(hw->core);
530 }
531 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
532
533 bool __clk_is_enabled(struct clk *clk)
534 {
535 if (!clk)
536 return false;
537
538 return clk_core_is_enabled(clk->core);
539 }
540 EXPORT_SYMBOL_GPL(__clk_is_enabled);
541
542 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
543 unsigned long best, unsigned long flags)
544 {
545 if (flags & CLK_MUX_ROUND_CLOSEST)
546 return abs(now - rate) < abs(best - rate);
547
548 return now <= rate && now > best;
549 }
550
551 int clk_mux_determine_rate_flags(struct clk_hw *hw,
552 struct clk_rate_request *req,
553 unsigned long flags)
554 {
555 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
556 int i, num_parents, ret;
557 unsigned long best = 0;
558 struct clk_rate_request parent_req = *req;
559
560
561 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
562 parent = core->parent;
563 if (core->flags & CLK_SET_RATE_PARENT) {
564 ret = __clk_determine_rate(parent ? parent->hw : NULL,
565 &parent_req);
566 if (ret)
567 return ret;
568
569 best = parent_req.rate;
570 } else if (parent) {
571 best = clk_core_get_rate_nolock(parent);
572 } else {
573 best = clk_core_get_rate_nolock(core);
574 }
575
576 goto out;
577 }
578
579
580 num_parents = core->num_parents;
581 for (i = 0; i < num_parents; i++) {
582 parent = clk_core_get_parent_by_index(core, i);
583 if (!parent)
584 continue;
585
586 if (core->flags & CLK_SET_RATE_PARENT) {
587 parent_req = *req;
588 ret = __clk_determine_rate(parent->hw, &parent_req);
589 if (ret)
590 continue;
591 } else {
592 parent_req.rate = clk_core_get_rate_nolock(parent);
593 }
594
595 if (mux_is_better_rate(req->rate, parent_req.rate,
596 best, flags)) {
597 best_parent = parent;
598 best = parent_req.rate;
599 }
600 }
601
602 if (!best_parent)
603 return -EINVAL;
604
605 out:
606 if (best_parent)
607 req->best_parent_hw = best_parent->hw;
608 req->best_parent_rate = best;
609 req->rate = best;
610
611 return 0;
612 }
613 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
614
615 struct clk *__clk_lookup(const char *name)
616 {
617 struct clk_core *core = clk_core_lookup(name);
618
619 return !core ? NULL : core->hw->clk;
620 }
621
622 static void clk_core_get_boundaries(struct clk_core *core,
623 unsigned long *min_rate,
624 unsigned long *max_rate)
625 {
626 struct clk *clk_user;
627
628 lockdep_assert_held(&prepare_lock);
629
630 *min_rate = core->min_rate;
631 *max_rate = core->max_rate;
632
633 hlist_for_each_entry(clk_user, &core->clks, clks_node)
634 *min_rate = max(*min_rate, clk_user->min_rate);
635
636 hlist_for_each_entry(clk_user, &core->clks, clks_node)
637 *max_rate = min(*max_rate, clk_user->max_rate);
638 }
639
640 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
641 unsigned long max_rate)
642 {
643 hw->core->min_rate = min_rate;
644 hw->core->max_rate = max_rate;
645 }
646 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
647
648
649
650
651
652
653
654
655
656
657
658
659 int __clk_mux_determine_rate(struct clk_hw *hw,
660 struct clk_rate_request *req)
661 {
662 return clk_mux_determine_rate_flags(hw, req, 0);
663 }
664 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
665
666 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
667 struct clk_rate_request *req)
668 {
669 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
670 }
671 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
672
673
674
675 static void clk_core_rate_unprotect(struct clk_core *core)
676 {
677 lockdep_assert_held(&prepare_lock);
678
679 if (!core)
680 return;
681
682 if (WARN(core->protect_count == 0,
683 "%s already unprotected\n", core->name))
684 return;
685
686 if (--core->protect_count > 0)
687 return;
688
689 clk_core_rate_unprotect(core->parent);
690 }
691
692 static int clk_core_rate_nuke_protect(struct clk_core *core)
693 {
694 int ret;
695
696 lockdep_assert_held(&prepare_lock);
697
698 if (!core)
699 return -EINVAL;
700
701 if (core->protect_count == 0)
702 return 0;
703
704 ret = core->protect_count;
705 core->protect_count = 1;
706 clk_core_rate_unprotect(core);
707
708 return ret;
709 }
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729 void clk_rate_exclusive_put(struct clk *clk)
730 {
731 if (!clk)
732 return;
733
734 clk_prepare_lock();
735
736
737
738
739
740 if (WARN_ON(clk->exclusive_count <= 0))
741 goto out;
742
743 clk_core_rate_unprotect(clk->core);
744 clk->exclusive_count--;
745 out:
746 clk_prepare_unlock();
747 }
748 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
749
750 static void clk_core_rate_protect(struct clk_core *core)
751 {
752 lockdep_assert_held(&prepare_lock);
753
754 if (!core)
755 return;
756
757 if (core->protect_count == 0)
758 clk_core_rate_protect(core->parent);
759
760 core->protect_count++;
761 }
762
763 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
764 {
765 lockdep_assert_held(&prepare_lock);
766
767 if (!core)
768 return;
769
770 if (count == 0)
771 return;
772
773 clk_core_rate_protect(core);
774 core->protect_count = count;
775 }
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 int clk_rate_exclusive_get(struct clk *clk)
796 {
797 if (!clk)
798 return 0;
799
800 clk_prepare_lock();
801 clk_core_rate_protect(clk->core);
802 clk->exclusive_count++;
803 clk_prepare_unlock();
804
805 return 0;
806 }
807 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
808
809 static void clk_core_unprepare(struct clk_core *core)
810 {
811 lockdep_assert_held(&prepare_lock);
812
813 if (!core)
814 return;
815
816 if (WARN(core->prepare_count == 0,
817 "%s already unprepared\n", core->name))
818 return;
819
820 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
821 "Unpreparing critical %s\n", core->name))
822 return;
823
824 if (core->flags & CLK_SET_RATE_GATE)
825 clk_core_rate_unprotect(core);
826
827 if (--core->prepare_count > 0)
828 return;
829
830 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
831
832 trace_clk_unprepare(core);
833
834 if (core->ops->unprepare)
835 core->ops->unprepare(core->hw);
836
837 clk_pm_runtime_put(core);
838
839 trace_clk_unprepare_complete(core);
840 clk_core_unprepare(core->parent);
841 }
842
843 static void clk_core_unprepare_lock(struct clk_core *core)
844 {
845 clk_prepare_lock();
846 clk_core_unprepare(core);
847 clk_prepare_unlock();
848 }
849
850
851
852
853
854
855
856
857
858
859
860
861 void clk_unprepare(struct clk *clk)
862 {
863 if (IS_ERR_OR_NULL(clk))
864 return;
865
866 clk_core_unprepare_lock(clk->core);
867 }
868 EXPORT_SYMBOL_GPL(clk_unprepare);
869
870 static int clk_core_prepare(struct clk_core *core)
871 {
872 int ret = 0;
873
874 lockdep_assert_held(&prepare_lock);
875
876 if (!core)
877 return 0;
878
879 if (core->prepare_count == 0) {
880 ret = clk_pm_runtime_get(core);
881 if (ret)
882 return ret;
883
884 ret = clk_core_prepare(core->parent);
885 if (ret)
886 goto runtime_put;
887
888 trace_clk_prepare(core);
889
890 if (core->ops->prepare)
891 ret = core->ops->prepare(core->hw);
892
893 trace_clk_prepare_complete(core);
894
895 if (ret)
896 goto unprepare;
897 }
898
899 core->prepare_count++;
900
901
902
903
904
905
906
907
908 if (core->flags & CLK_SET_RATE_GATE)
909 clk_core_rate_protect(core);
910
911 return 0;
912 unprepare:
913 clk_core_unprepare(core->parent);
914 runtime_put:
915 clk_pm_runtime_put(core);
916 return ret;
917 }
918
919 static int clk_core_prepare_lock(struct clk_core *core)
920 {
921 int ret;
922
923 clk_prepare_lock();
924 ret = clk_core_prepare(core);
925 clk_prepare_unlock();
926
927 return ret;
928 }
929
930
931
932
933
934
935
936
937
938
939
940
941
942 int clk_prepare(struct clk *clk)
943 {
944 if (!clk)
945 return 0;
946
947 return clk_core_prepare_lock(clk->core);
948 }
949 EXPORT_SYMBOL_GPL(clk_prepare);
950
951 static void clk_core_disable(struct clk_core *core)
952 {
953 lockdep_assert_held(&enable_lock);
954
955 if (!core)
956 return;
957
958 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
959 return;
960
961 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
962 "Disabling critical %s\n", core->name))
963 return;
964
965 if (--core->enable_count > 0)
966 return;
967
968 trace_clk_disable_rcuidle(core);
969
970 if (core->ops->disable)
971 core->ops->disable(core->hw);
972
973 trace_clk_disable_complete_rcuidle(core);
974
975 clk_core_disable(core->parent);
976 }
977
978 static void clk_core_disable_lock(struct clk_core *core)
979 {
980 unsigned long flags;
981
982 flags = clk_enable_lock();
983 clk_core_disable(core);
984 clk_enable_unlock(flags);
985 }
986
987
988
989
990
991
992
993
994
995
996
997
998
999 void clk_disable(struct clk *clk)
1000 {
1001 if (IS_ERR_OR_NULL(clk))
1002 return;
1003
1004 clk_core_disable_lock(clk->core);
1005 }
1006 EXPORT_SYMBOL_GPL(clk_disable);
1007
1008 static int clk_core_enable(struct clk_core *core)
1009 {
1010 int ret = 0;
1011
1012 lockdep_assert_held(&enable_lock);
1013
1014 if (!core)
1015 return 0;
1016
1017 if (WARN(core->prepare_count == 0,
1018 "Enabling unprepared %s\n", core->name))
1019 return -ESHUTDOWN;
1020
1021 if (core->enable_count == 0) {
1022 ret = clk_core_enable(core->parent);
1023
1024 if (ret)
1025 return ret;
1026
1027 trace_clk_enable_rcuidle(core);
1028
1029 if (core->ops->enable)
1030 ret = core->ops->enable(core->hw);
1031
1032 trace_clk_enable_complete_rcuidle(core);
1033
1034 if (ret) {
1035 clk_core_disable(core->parent);
1036 return ret;
1037 }
1038 }
1039
1040 core->enable_count++;
1041 return 0;
1042 }
1043
1044 static int clk_core_enable_lock(struct clk_core *core)
1045 {
1046 unsigned long flags;
1047 int ret;
1048
1049 flags = clk_enable_lock();
1050 ret = clk_core_enable(core);
1051 clk_enable_unlock(flags);
1052
1053 return ret;
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 void clk_gate_restore_context(struct clk_hw *hw)
1067 {
1068 struct clk_core *core = hw->core;
1069
1070 if (core->enable_count)
1071 core->ops->enable(hw);
1072 else
1073 core->ops->disable(hw);
1074 }
1075 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1076
1077 static int clk_core_save_context(struct clk_core *core)
1078 {
1079 struct clk_core *child;
1080 int ret = 0;
1081
1082 hlist_for_each_entry(child, &core->children, child_node) {
1083 ret = clk_core_save_context(child);
1084 if (ret < 0)
1085 return ret;
1086 }
1087
1088 if (core->ops && core->ops->save_context)
1089 ret = core->ops->save_context(core->hw);
1090
1091 return ret;
1092 }
1093
1094 static void clk_core_restore_context(struct clk_core *core)
1095 {
1096 struct clk_core *child;
1097
1098 if (core->ops && core->ops->restore_context)
1099 core->ops->restore_context(core->hw);
1100
1101 hlist_for_each_entry(child, &core->children, child_node)
1102 clk_core_restore_context(child);
1103 }
1104
1105
1106
1107
1108
1109
1110
1111
1112 int clk_save_context(void)
1113 {
1114 struct clk_core *clk;
1115 int ret;
1116
1117 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1118 ret = clk_core_save_context(clk);
1119 if (ret < 0)
1120 return ret;
1121 }
1122
1123 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1124 ret = clk_core_save_context(clk);
1125 if (ret < 0)
1126 return ret;
1127 }
1128
1129 return 0;
1130 }
1131 EXPORT_SYMBOL_GPL(clk_save_context);
1132
1133
1134
1135
1136
1137
1138
1139 void clk_restore_context(void)
1140 {
1141 struct clk_core *core;
1142
1143 hlist_for_each_entry(core, &clk_root_list, child_node)
1144 clk_core_restore_context(core);
1145
1146 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1147 clk_core_restore_context(core);
1148 }
1149 EXPORT_SYMBOL_GPL(clk_restore_context);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 int clk_enable(struct clk *clk)
1165 {
1166 if (!clk)
1167 return 0;
1168
1169 return clk_core_enable_lock(clk->core);
1170 }
1171 EXPORT_SYMBOL_GPL(clk_enable);
1172
1173 static int clk_core_prepare_enable(struct clk_core *core)
1174 {
1175 int ret;
1176
1177 ret = clk_core_prepare_lock(core);
1178 if (ret)
1179 return ret;
1180
1181 ret = clk_core_enable_lock(core);
1182 if (ret)
1183 clk_core_unprepare_lock(core);
1184
1185 return ret;
1186 }
1187
1188 static void clk_core_disable_unprepare(struct clk_core *core)
1189 {
1190 clk_core_disable_lock(core);
1191 clk_core_unprepare_lock(core);
1192 }
1193
1194 static void clk_unprepare_unused_subtree(struct clk_core *core)
1195 {
1196 struct clk_core *child;
1197
1198 lockdep_assert_held(&prepare_lock);
1199
1200 hlist_for_each_entry(child, &core->children, child_node)
1201 clk_unprepare_unused_subtree(child);
1202
1203 if (core->prepare_count)
1204 return;
1205
1206 if (core->flags & CLK_IGNORE_UNUSED)
1207 return;
1208
1209 if (clk_pm_runtime_get(core))
1210 return;
1211
1212 if (clk_core_is_prepared(core)) {
1213 trace_clk_unprepare(core);
1214 if (core->ops->unprepare_unused)
1215 core->ops->unprepare_unused(core->hw);
1216 else if (core->ops->unprepare)
1217 core->ops->unprepare(core->hw);
1218 trace_clk_unprepare_complete(core);
1219 }
1220
1221 clk_pm_runtime_put(core);
1222 }
1223
1224 static void clk_disable_unused_subtree(struct clk_core *core)
1225 {
1226 struct clk_core *child;
1227 unsigned long flags;
1228
1229 lockdep_assert_held(&prepare_lock);
1230
1231 hlist_for_each_entry(child, &core->children, child_node)
1232 clk_disable_unused_subtree(child);
1233
1234 if (core->flags & CLK_OPS_PARENT_ENABLE)
1235 clk_core_prepare_enable(core->parent);
1236
1237 if (clk_pm_runtime_get(core))
1238 goto unprepare_out;
1239
1240 flags = clk_enable_lock();
1241
1242 if (core->enable_count)
1243 goto unlock_out;
1244
1245 if (core->flags & CLK_IGNORE_UNUSED)
1246 goto unlock_out;
1247
1248
1249
1250
1251
1252
1253 if (clk_core_is_enabled(core)) {
1254 trace_clk_disable(core);
1255 if (core->ops->disable_unused)
1256 core->ops->disable_unused(core->hw);
1257 else if (core->ops->disable)
1258 core->ops->disable(core->hw);
1259 trace_clk_disable_complete(core);
1260 }
1261
1262 unlock_out:
1263 clk_enable_unlock(flags);
1264 clk_pm_runtime_put(core);
1265 unprepare_out:
1266 if (core->flags & CLK_OPS_PARENT_ENABLE)
1267 clk_core_disable_unprepare(core->parent);
1268 }
1269
1270 static bool clk_ignore_unused;
1271 static int __init clk_ignore_unused_setup(char *__unused)
1272 {
1273 clk_ignore_unused = true;
1274 return 1;
1275 }
1276 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1277
1278 static int clk_disable_unused(void)
1279 {
1280 struct clk_core *core;
1281
1282 if (clk_ignore_unused) {
1283 pr_warn("clk: Not disabling unused clocks\n");
1284 return 0;
1285 }
1286
1287 clk_prepare_lock();
1288
1289 hlist_for_each_entry(core, &clk_root_list, child_node)
1290 clk_disable_unused_subtree(core);
1291
1292 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1293 clk_disable_unused_subtree(core);
1294
1295 hlist_for_each_entry(core, &clk_root_list, child_node)
1296 clk_unprepare_unused_subtree(core);
1297
1298 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1299 clk_unprepare_unused_subtree(core);
1300
1301 clk_prepare_unlock();
1302
1303 return 0;
1304 }
1305 late_initcall_sync(clk_disable_unused);
1306
1307 static int clk_core_determine_round_nolock(struct clk_core *core,
1308 struct clk_rate_request *req)
1309 {
1310 long rate;
1311
1312 lockdep_assert_held(&prepare_lock);
1313
1314 if (!core)
1315 return 0;
1316
1317
1318
1319
1320
1321
1322
1323 if (clk_core_rate_is_protected(core)) {
1324 req->rate = core->rate;
1325 } else if (core->ops->determine_rate) {
1326 return core->ops->determine_rate(core->hw, req);
1327 } else if (core->ops->round_rate) {
1328 rate = core->ops->round_rate(core->hw, req->rate,
1329 &req->best_parent_rate);
1330 if (rate < 0)
1331 return rate;
1332
1333 req->rate = rate;
1334 } else {
1335 return -EINVAL;
1336 }
1337
1338 return 0;
1339 }
1340
1341 static void clk_core_init_rate_req(struct clk_core * const core,
1342 struct clk_rate_request *req)
1343 {
1344 struct clk_core *parent;
1345
1346 if (WARN_ON(!core || !req))
1347 return;
1348
1349 parent = core->parent;
1350 if (parent) {
1351 req->best_parent_hw = parent->hw;
1352 req->best_parent_rate = parent->rate;
1353 } else {
1354 req->best_parent_hw = NULL;
1355 req->best_parent_rate = 0;
1356 }
1357 }
1358
1359 static bool clk_core_can_round(struct clk_core * const core)
1360 {
1361 return core->ops->determine_rate || core->ops->round_rate;
1362 }
1363
1364 static int clk_core_round_rate_nolock(struct clk_core *core,
1365 struct clk_rate_request *req)
1366 {
1367 lockdep_assert_held(&prepare_lock);
1368
1369 if (!core) {
1370 req->rate = 0;
1371 return 0;
1372 }
1373
1374 clk_core_init_rate_req(core, req);
1375
1376 if (clk_core_can_round(core))
1377 return clk_core_determine_round_nolock(core, req);
1378 else if (core->flags & CLK_SET_RATE_PARENT)
1379 return clk_core_round_rate_nolock(core->parent, req);
1380
1381 req->rate = core->rate;
1382 return 0;
1383 }
1384
1385
1386
1387
1388
1389
1390
1391
1392 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1393 {
1394 if (!hw) {
1395 req->rate = 0;
1396 return 0;
1397 }
1398
1399 return clk_core_round_rate_nolock(hw->core, req);
1400 }
1401 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1402
1403 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1404 {
1405 int ret;
1406 struct clk_rate_request req;
1407
1408 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1409 req.rate = rate;
1410
1411 ret = clk_core_round_rate_nolock(hw->core, &req);
1412 if (ret)
1413 return 0;
1414
1415 return req.rate;
1416 }
1417 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 long clk_round_rate(struct clk *clk, unsigned long rate)
1429 {
1430 struct clk_rate_request req;
1431 int ret;
1432
1433 if (!clk)
1434 return 0;
1435
1436 clk_prepare_lock();
1437
1438 if (clk->exclusive_count)
1439 clk_core_rate_unprotect(clk->core);
1440
1441 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1442 req.rate = rate;
1443
1444 ret = clk_core_round_rate_nolock(clk->core, &req);
1445
1446 if (clk->exclusive_count)
1447 clk_core_rate_protect(clk->core);
1448
1449 clk_prepare_unlock();
1450
1451 if (ret)
1452 return ret;
1453
1454 return req.rate;
1455 }
1456 EXPORT_SYMBOL_GPL(clk_round_rate);
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472 static int __clk_notify(struct clk_core *core, unsigned long msg,
1473 unsigned long old_rate, unsigned long new_rate)
1474 {
1475 struct clk_notifier *cn;
1476 struct clk_notifier_data cnd;
1477 int ret = NOTIFY_DONE;
1478
1479 cnd.old_rate = old_rate;
1480 cnd.new_rate = new_rate;
1481
1482 list_for_each_entry(cn, &clk_notifier_list, node) {
1483 if (cn->clk->core == core) {
1484 cnd.clk = cn->clk;
1485 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1486 &cnd);
1487 if (ret & NOTIFY_STOP_MASK)
1488 return ret;
1489 }
1490 }
1491
1492 return ret;
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504 static void __clk_recalc_accuracies(struct clk_core *core)
1505 {
1506 unsigned long parent_accuracy = 0;
1507 struct clk_core *child;
1508
1509 lockdep_assert_held(&prepare_lock);
1510
1511 if (core->parent)
1512 parent_accuracy = core->parent->accuracy;
1513
1514 if (core->ops->recalc_accuracy)
1515 core->accuracy = core->ops->recalc_accuracy(core->hw,
1516 parent_accuracy);
1517 else
1518 core->accuracy = parent_accuracy;
1519
1520 hlist_for_each_entry(child, &core->children, child_node)
1521 __clk_recalc_accuracies(child);
1522 }
1523
1524 static long clk_core_get_accuracy(struct clk_core *core)
1525 {
1526 unsigned long accuracy;
1527
1528 clk_prepare_lock();
1529 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1530 __clk_recalc_accuracies(core);
1531
1532 accuracy = __clk_get_accuracy(core);
1533 clk_prepare_unlock();
1534
1535 return accuracy;
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 long clk_get_accuracy(struct clk *clk)
1548 {
1549 if (!clk)
1550 return 0;
1551
1552 return clk_core_get_accuracy(clk->core);
1553 }
1554 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1555
1556 static unsigned long clk_recalc(struct clk_core *core,
1557 unsigned long parent_rate)
1558 {
1559 unsigned long rate = parent_rate;
1560
1561 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1562 rate = core->ops->recalc_rate(core->hw, parent_rate);
1563 clk_pm_runtime_put(core);
1564 }
1565 return rate;
1566 }
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1581 {
1582 unsigned long old_rate;
1583 unsigned long parent_rate = 0;
1584 struct clk_core *child;
1585
1586 lockdep_assert_held(&prepare_lock);
1587
1588 old_rate = core->rate;
1589
1590 if (core->parent)
1591 parent_rate = core->parent->rate;
1592
1593 core->rate = clk_recalc(core, parent_rate);
1594
1595
1596
1597
1598
1599 if (core->notifier_count && msg)
1600 __clk_notify(core, msg, old_rate, core->rate);
1601
1602 hlist_for_each_entry(child, &core->children, child_node)
1603 __clk_recalc_rates(child, msg);
1604 }
1605
1606 static unsigned long clk_core_get_rate(struct clk_core *core)
1607 {
1608 unsigned long rate;
1609
1610 clk_prepare_lock();
1611
1612 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1613 __clk_recalc_rates(core, 0);
1614
1615 rate = clk_core_get_rate_nolock(core);
1616 clk_prepare_unlock();
1617
1618 return rate;
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 unsigned long clk_get_rate(struct clk *clk)
1630 {
1631 if (!clk)
1632 return 0;
1633
1634 return clk_core_get_rate(clk->core);
1635 }
1636 EXPORT_SYMBOL_GPL(clk_get_rate);
1637
1638 static int clk_fetch_parent_index(struct clk_core *core,
1639 struct clk_core *parent)
1640 {
1641 int i;
1642
1643 if (!parent)
1644 return -EINVAL;
1645
1646 for (i = 0; i < core->num_parents; i++) {
1647
1648 if (core->parents[i].core == parent)
1649 return i;
1650
1651
1652 if (core->parents[i].core)
1653 continue;
1654
1655
1656 if (core->parents[i].hw) {
1657 if (core->parents[i].hw == parent->hw)
1658 break;
1659
1660
1661 continue;
1662 }
1663
1664
1665 if (parent == clk_core_get(core, i))
1666 break;
1667
1668
1669 if (core->parents[i].name &&
1670 !strcmp(parent->name, core->parents[i].name))
1671 break;
1672 }
1673
1674 if (i == core->num_parents)
1675 return -EINVAL;
1676
1677 core->parents[i].core = parent;
1678 return i;
1679 }
1680
1681
1682
1683
1684 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1685 {
1686 struct clk_core *child;
1687
1688 core->orphan = is_orphan;
1689
1690 hlist_for_each_entry(child, &core->children, child_node)
1691 clk_core_update_orphan_status(child, is_orphan);
1692 }
1693
1694 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1695 {
1696 bool was_orphan = core->orphan;
1697
1698 hlist_del(&core->child_node);
1699
1700 if (new_parent) {
1701 bool becomes_orphan = new_parent->orphan;
1702
1703
1704 if (new_parent->new_child == core)
1705 new_parent->new_child = NULL;
1706
1707 hlist_add_head(&core->child_node, &new_parent->children);
1708
1709 if (was_orphan != becomes_orphan)
1710 clk_core_update_orphan_status(core, becomes_orphan);
1711 } else {
1712 hlist_add_head(&core->child_node, &clk_orphan_list);
1713 if (!was_orphan)
1714 clk_core_update_orphan_status(core, true);
1715 }
1716
1717 core->parent = new_parent;
1718 }
1719
1720 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1721 struct clk_core *parent)
1722 {
1723 unsigned long flags;
1724 struct clk_core *old_parent = core->parent;
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1748 clk_core_prepare_enable(old_parent);
1749 clk_core_prepare_enable(parent);
1750 }
1751
1752
1753 if (core->prepare_count) {
1754 clk_core_prepare_enable(parent);
1755 clk_core_enable_lock(core);
1756 }
1757
1758
1759 flags = clk_enable_lock();
1760 clk_reparent(core, parent);
1761 clk_enable_unlock(flags);
1762
1763 return old_parent;
1764 }
1765
1766 static void __clk_set_parent_after(struct clk_core *core,
1767 struct clk_core *parent,
1768 struct clk_core *old_parent)
1769 {
1770
1771
1772
1773
1774 if (core->prepare_count) {
1775 clk_core_disable_lock(core);
1776 clk_core_disable_unprepare(old_parent);
1777 }
1778
1779
1780 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1781 clk_core_disable_unprepare(parent);
1782 clk_core_disable_unprepare(old_parent);
1783 }
1784 }
1785
1786 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1787 u8 p_index)
1788 {
1789 unsigned long flags;
1790 int ret = 0;
1791 struct clk_core *old_parent;
1792
1793 old_parent = __clk_set_parent_before(core, parent);
1794
1795 trace_clk_set_parent(core, parent);
1796
1797
1798 if (parent && core->ops->set_parent)
1799 ret = core->ops->set_parent(core->hw, p_index);
1800
1801 trace_clk_set_parent_complete(core, parent);
1802
1803 if (ret) {
1804 flags = clk_enable_lock();
1805 clk_reparent(core, old_parent);
1806 clk_enable_unlock(flags);
1807 __clk_set_parent_after(core, old_parent, parent);
1808
1809 return ret;
1810 }
1811
1812 __clk_set_parent_after(core, parent, old_parent);
1813
1814 return 0;
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 static int __clk_speculate_rates(struct clk_core *core,
1832 unsigned long parent_rate)
1833 {
1834 struct clk_core *child;
1835 unsigned long new_rate;
1836 int ret = NOTIFY_DONE;
1837
1838 lockdep_assert_held(&prepare_lock);
1839
1840 new_rate = clk_recalc(core, parent_rate);
1841
1842
1843 if (core->notifier_count)
1844 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1845
1846 if (ret & NOTIFY_STOP_MASK) {
1847 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1848 __func__, core->name, ret);
1849 goto out;
1850 }
1851
1852 hlist_for_each_entry(child, &core->children, child_node) {
1853 ret = __clk_speculate_rates(child, new_rate);
1854 if (ret & NOTIFY_STOP_MASK)
1855 break;
1856 }
1857
1858 out:
1859 return ret;
1860 }
1861
1862 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1863 struct clk_core *new_parent, u8 p_index)
1864 {
1865 struct clk_core *child;
1866
1867 core->new_rate = new_rate;
1868 core->new_parent = new_parent;
1869 core->new_parent_index = p_index;
1870
1871 core->new_child = NULL;
1872 if (new_parent && new_parent != core->parent)
1873 new_parent->new_child = core;
1874
1875 hlist_for_each_entry(child, &core->children, child_node) {
1876 child->new_rate = clk_recalc(child, new_rate);
1877 clk_calc_subtree(child, child->new_rate, NULL, 0);
1878 }
1879 }
1880
1881
1882
1883
1884
1885 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1886 unsigned long rate)
1887 {
1888 struct clk_core *top = core;
1889 struct clk_core *old_parent, *parent;
1890 unsigned long best_parent_rate = 0;
1891 unsigned long new_rate;
1892 unsigned long min_rate;
1893 unsigned long max_rate;
1894 int p_index = 0;
1895 long ret;
1896
1897
1898 if (IS_ERR_OR_NULL(core))
1899 return NULL;
1900
1901
1902 parent = old_parent = core->parent;
1903 if (parent)
1904 best_parent_rate = parent->rate;
1905
1906 clk_core_get_boundaries(core, &min_rate, &max_rate);
1907
1908
1909 if (clk_core_can_round(core)) {
1910 struct clk_rate_request req;
1911
1912 req.rate = rate;
1913 req.min_rate = min_rate;
1914 req.max_rate = max_rate;
1915
1916 clk_core_init_rate_req(core, &req);
1917
1918 ret = clk_core_determine_round_nolock(core, &req);
1919 if (ret < 0)
1920 return NULL;
1921
1922 best_parent_rate = req.best_parent_rate;
1923 new_rate = req.rate;
1924 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1925
1926 if (new_rate < min_rate || new_rate > max_rate)
1927 return NULL;
1928 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1929
1930 core->new_rate = core->rate;
1931 return NULL;
1932 } else {
1933
1934 top = clk_calc_new_rates(parent, rate);
1935 new_rate = parent->new_rate;
1936 goto out;
1937 }
1938
1939
1940 if (parent != old_parent &&
1941 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1942 pr_debug("%s: %s not gated but wants to reparent\n",
1943 __func__, core->name);
1944 return NULL;
1945 }
1946
1947
1948 if (parent && core->num_parents > 1) {
1949 p_index = clk_fetch_parent_index(core, parent);
1950 if (p_index < 0) {
1951 pr_debug("%s: clk %s can not be parent of clk %s\n",
1952 __func__, parent->name, core->name);
1953 return NULL;
1954 }
1955 }
1956
1957 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1958 best_parent_rate != parent->rate)
1959 top = clk_calc_new_rates(parent, best_parent_rate);
1960
1961 out:
1962 clk_calc_subtree(core, new_rate, parent, p_index);
1963
1964 return top;
1965 }
1966
1967
1968
1969
1970
1971
1972 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1973 unsigned long event)
1974 {
1975 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1976 int ret = NOTIFY_DONE;
1977
1978 if (core->rate == core->new_rate)
1979 return NULL;
1980
1981 if (core->notifier_count) {
1982 ret = __clk_notify(core, event, core->rate, core->new_rate);
1983 if (ret & NOTIFY_STOP_MASK)
1984 fail_clk = core;
1985 }
1986
1987 hlist_for_each_entry(child, &core->children, child_node) {
1988
1989 if (child->new_parent && child->new_parent != core)
1990 continue;
1991 tmp_clk = clk_propagate_rate_change(child, event);
1992 if (tmp_clk)
1993 fail_clk = tmp_clk;
1994 }
1995
1996
1997 if (core->new_child) {
1998 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1999 if (tmp_clk)
2000 fail_clk = tmp_clk;
2001 }
2002
2003 return fail_clk;
2004 }
2005
2006
2007
2008
2009
2010 static void clk_change_rate(struct clk_core *core)
2011 {
2012 struct clk_core *child;
2013 struct hlist_node *tmp;
2014 unsigned long old_rate;
2015 unsigned long best_parent_rate = 0;
2016 bool skip_set_rate = false;
2017 struct clk_core *old_parent;
2018 struct clk_core *parent = NULL;
2019
2020 old_rate = core->rate;
2021
2022 if (core->new_parent) {
2023 parent = core->new_parent;
2024 best_parent_rate = core->new_parent->rate;
2025 } else if (core->parent) {
2026 parent = core->parent;
2027 best_parent_rate = core->parent->rate;
2028 }
2029
2030 if (clk_pm_runtime_get(core))
2031 return;
2032
2033 if (core->flags & CLK_SET_RATE_UNGATE) {
2034 unsigned long flags;
2035
2036 clk_core_prepare(core);
2037 flags = clk_enable_lock();
2038 clk_core_enable(core);
2039 clk_enable_unlock(flags);
2040 }
2041
2042 if (core->new_parent && core->new_parent != core->parent) {
2043 old_parent = __clk_set_parent_before(core, core->new_parent);
2044 trace_clk_set_parent(core, core->new_parent);
2045
2046 if (core->ops->set_rate_and_parent) {
2047 skip_set_rate = true;
2048 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2049 best_parent_rate,
2050 core->new_parent_index);
2051 } else if (core->ops->set_parent) {
2052 core->ops->set_parent(core->hw, core->new_parent_index);
2053 }
2054
2055 trace_clk_set_parent_complete(core, core->new_parent);
2056 __clk_set_parent_after(core, core->new_parent, old_parent);
2057 }
2058
2059 if (core->flags & CLK_OPS_PARENT_ENABLE)
2060 clk_core_prepare_enable(parent);
2061
2062 trace_clk_set_rate(core, core->new_rate);
2063
2064 if (!skip_set_rate && core->ops->set_rate)
2065 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2066
2067 trace_clk_set_rate_complete(core, core->new_rate);
2068
2069 core->rate = clk_recalc(core, best_parent_rate);
2070
2071 if (core->flags & CLK_SET_RATE_UNGATE) {
2072 unsigned long flags;
2073
2074 flags = clk_enable_lock();
2075 clk_core_disable(core);
2076 clk_enable_unlock(flags);
2077 clk_core_unprepare(core);
2078 }
2079
2080 if (core->flags & CLK_OPS_PARENT_ENABLE)
2081 clk_core_disable_unprepare(parent);
2082
2083 if (core->notifier_count && old_rate != core->rate)
2084 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2085
2086 if (core->flags & CLK_RECALC_NEW_RATES)
2087 (void)clk_calc_new_rates(core, core->new_rate);
2088
2089
2090
2091
2092
2093 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2094
2095 if (child->new_parent && child->new_parent != core)
2096 continue;
2097 clk_change_rate(child);
2098 }
2099
2100
2101 if (core->new_child)
2102 clk_change_rate(core->new_child);
2103
2104 clk_pm_runtime_put(core);
2105 }
2106
2107 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2108 unsigned long req_rate)
2109 {
2110 int ret, cnt;
2111 struct clk_rate_request req;
2112
2113 lockdep_assert_held(&prepare_lock);
2114
2115 if (!core)
2116 return 0;
2117
2118
2119 cnt = clk_core_rate_nuke_protect(core);
2120 if (cnt < 0)
2121 return cnt;
2122
2123 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2124 req.rate = req_rate;
2125
2126 ret = clk_core_round_rate_nolock(core, &req);
2127
2128
2129 clk_core_rate_restore_protect(core, cnt);
2130
2131 return ret ? 0 : req.rate;
2132 }
2133
2134 static int clk_core_set_rate_nolock(struct clk_core *core,
2135 unsigned long req_rate)
2136 {
2137 struct clk_core *top, *fail_clk;
2138 unsigned long rate;
2139 int ret = 0;
2140
2141 if (!core)
2142 return 0;
2143
2144 rate = clk_core_req_round_rate_nolock(core, req_rate);
2145
2146
2147 if (rate == clk_core_get_rate_nolock(core))
2148 return 0;
2149
2150
2151 if (clk_core_rate_is_protected(core))
2152 return -EBUSY;
2153
2154
2155 top = clk_calc_new_rates(core, req_rate);
2156 if (!top)
2157 return -EINVAL;
2158
2159 ret = clk_pm_runtime_get(core);
2160 if (ret)
2161 return ret;
2162
2163
2164 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2165 if (fail_clk) {
2166 pr_debug("%s: failed to set %s rate\n", __func__,
2167 fail_clk->name);
2168 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2169 ret = -EBUSY;
2170 goto err;
2171 }
2172
2173
2174 clk_change_rate(top);
2175
2176 core->req_rate = req_rate;
2177 err:
2178 clk_pm_runtime_put(core);
2179
2180 return ret;
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204 int clk_set_rate(struct clk *clk, unsigned long rate)
2205 {
2206 int ret;
2207
2208 if (!clk)
2209 return 0;
2210
2211
2212 clk_prepare_lock();
2213
2214 if (clk->exclusive_count)
2215 clk_core_rate_unprotect(clk->core);
2216
2217 ret = clk_core_set_rate_nolock(clk->core, rate);
2218
2219 if (clk->exclusive_count)
2220 clk_core_rate_protect(clk->core);
2221
2222 clk_prepare_unlock();
2223
2224 return ret;
2225 }
2226 EXPORT_SYMBOL_GPL(clk_set_rate);
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2248 {
2249 int ret;
2250
2251 if (!clk)
2252 return 0;
2253
2254
2255 clk_prepare_lock();
2256
2257
2258
2259
2260
2261
2262
2263 ret = clk_core_set_rate_nolock(clk->core, rate);
2264 if (!ret) {
2265 clk_core_rate_protect(clk->core);
2266 clk->exclusive_count++;
2267 }
2268
2269 clk_prepare_unlock();
2270
2271 return ret;
2272 }
2273 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2284 {
2285 int ret = 0;
2286 unsigned long old_min, old_max, rate;
2287
2288 if (!clk)
2289 return 0;
2290
2291 if (min > max) {
2292 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2293 __func__, clk->core->name, clk->dev_id, clk->con_id,
2294 min, max);
2295 return -EINVAL;
2296 }
2297
2298 clk_prepare_lock();
2299
2300 if (clk->exclusive_count)
2301 clk_core_rate_unprotect(clk->core);
2302
2303
2304 old_min = clk->min_rate;
2305 old_max = clk->max_rate;
2306 clk->min_rate = min;
2307 clk->max_rate = max;
2308
2309 rate = clk_core_get_rate_nolock(clk->core);
2310 if (rate < min || rate > max) {
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324 if (rate < min)
2325 rate = min;
2326 else
2327 rate = max;
2328
2329 ret = clk_core_set_rate_nolock(clk->core, rate);
2330 if (ret) {
2331
2332 clk->min_rate = old_min;
2333 clk->max_rate = old_max;
2334 }
2335 }
2336
2337 if (clk->exclusive_count)
2338 clk_core_rate_protect(clk->core);
2339
2340 clk_prepare_unlock();
2341
2342 return ret;
2343 }
2344 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2345
2346
2347
2348
2349
2350
2351
2352
2353 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2354 {
2355 if (!clk)
2356 return 0;
2357
2358 return clk_set_rate_range(clk, rate, clk->max_rate);
2359 }
2360 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2361
2362
2363
2364
2365
2366
2367
2368
2369 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2370 {
2371 if (!clk)
2372 return 0;
2373
2374 return clk_set_rate_range(clk, clk->min_rate, rate);
2375 }
2376 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2377
2378
2379
2380
2381
2382
2383
2384 struct clk *clk_get_parent(struct clk *clk)
2385 {
2386 struct clk *parent;
2387
2388 if (!clk)
2389 return NULL;
2390
2391 clk_prepare_lock();
2392
2393 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2394 clk_prepare_unlock();
2395
2396 return parent;
2397 }
2398 EXPORT_SYMBOL_GPL(clk_get_parent);
2399
2400 static struct clk_core *__clk_init_parent(struct clk_core *core)
2401 {
2402 u8 index = 0;
2403
2404 if (core->num_parents > 1 && core->ops->get_parent)
2405 index = core->ops->get_parent(core->hw);
2406
2407 return clk_core_get_parent_by_index(core, index);
2408 }
2409
2410 static void clk_core_reparent(struct clk_core *core,
2411 struct clk_core *new_parent)
2412 {
2413 clk_reparent(core, new_parent);
2414 __clk_recalc_accuracies(core);
2415 __clk_recalc_rates(core, POST_RATE_CHANGE);
2416 }
2417
2418 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2419 {
2420 if (!hw)
2421 return;
2422
2423 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436 bool clk_has_parent(struct clk *clk, struct clk *parent)
2437 {
2438 struct clk_core *core, *parent_core;
2439 int i;
2440
2441
2442 if (!clk || !parent)
2443 return true;
2444
2445 core = clk->core;
2446 parent_core = parent->core;
2447
2448
2449 if (core->parent == parent_core)
2450 return true;
2451
2452 for (i = 0; i < core->num_parents; i++)
2453 if (!strcmp(core->parents[i].name, parent_core->name))
2454 return true;
2455
2456 return false;
2457 }
2458 EXPORT_SYMBOL_GPL(clk_has_parent);
2459
2460 static int clk_core_set_parent_nolock(struct clk_core *core,
2461 struct clk_core *parent)
2462 {
2463 int ret = 0;
2464 int p_index = 0;
2465 unsigned long p_rate = 0;
2466
2467 lockdep_assert_held(&prepare_lock);
2468
2469 if (!core)
2470 return 0;
2471
2472 if (core->parent == parent)
2473 return 0;
2474
2475
2476 if (core->num_parents > 1 && !core->ops->set_parent)
2477 return -EPERM;
2478
2479
2480 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2481 return -EBUSY;
2482
2483 if (clk_core_rate_is_protected(core))
2484 return -EBUSY;
2485
2486
2487 if (parent) {
2488 p_index = clk_fetch_parent_index(core, parent);
2489 if (p_index < 0) {
2490 pr_debug("%s: clk %s can not be parent of clk %s\n",
2491 __func__, parent->name, core->name);
2492 return p_index;
2493 }
2494 p_rate = parent->rate;
2495 }
2496
2497 ret = clk_pm_runtime_get(core);
2498 if (ret)
2499 return ret;
2500
2501
2502 ret = __clk_speculate_rates(core, p_rate);
2503
2504
2505 if (ret & NOTIFY_STOP_MASK)
2506 goto runtime_put;
2507
2508
2509 ret = __clk_set_parent(core, parent, p_index);
2510
2511
2512 if (ret) {
2513 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2514 } else {
2515 __clk_recalc_rates(core, POST_RATE_CHANGE);
2516 __clk_recalc_accuracies(core);
2517 }
2518
2519 runtime_put:
2520 clk_pm_runtime_put(core);
2521
2522 return ret;
2523 }
2524
2525 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2526 {
2527 return clk_core_set_parent_nolock(hw->core, parent->core);
2528 }
2529 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548 int clk_set_parent(struct clk *clk, struct clk *parent)
2549 {
2550 int ret;
2551
2552 if (!clk)
2553 return 0;
2554
2555 clk_prepare_lock();
2556
2557 if (clk->exclusive_count)
2558 clk_core_rate_unprotect(clk->core);
2559
2560 ret = clk_core_set_parent_nolock(clk->core,
2561 parent ? parent->core : NULL);
2562
2563 if (clk->exclusive_count)
2564 clk_core_rate_protect(clk->core);
2565
2566 clk_prepare_unlock();
2567
2568 return ret;
2569 }
2570 EXPORT_SYMBOL_GPL(clk_set_parent);
2571
2572 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2573 {
2574 int ret = -EINVAL;
2575
2576 lockdep_assert_held(&prepare_lock);
2577
2578 if (!core)
2579 return 0;
2580
2581 if (clk_core_rate_is_protected(core))
2582 return -EBUSY;
2583
2584 trace_clk_set_phase(core, degrees);
2585
2586 if (core->ops->set_phase) {
2587 ret = core->ops->set_phase(core->hw, degrees);
2588 if (!ret)
2589 core->phase = degrees;
2590 }
2591
2592 trace_clk_set_phase_complete(core, degrees);
2593
2594 return ret;
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 int clk_set_phase(struct clk *clk, int degrees)
2618 {
2619 int ret;
2620
2621 if (!clk)
2622 return 0;
2623
2624
2625 degrees %= 360;
2626 if (degrees < 0)
2627 degrees += 360;
2628
2629 clk_prepare_lock();
2630
2631 if (clk->exclusive_count)
2632 clk_core_rate_unprotect(clk->core);
2633
2634 ret = clk_core_set_phase_nolock(clk->core, degrees);
2635
2636 if (clk->exclusive_count)
2637 clk_core_rate_protect(clk->core);
2638
2639 clk_prepare_unlock();
2640
2641 return ret;
2642 }
2643 EXPORT_SYMBOL_GPL(clk_set_phase);
2644
2645 static int clk_core_get_phase(struct clk_core *core)
2646 {
2647 int ret;
2648
2649 lockdep_assert_held(&prepare_lock);
2650 if (!core->ops->get_phase)
2651 return 0;
2652
2653
2654 ret = core->ops->get_phase(core->hw);
2655 if (ret >= 0)
2656 core->phase = ret;
2657
2658 return ret;
2659 }
2660
2661
2662
2663
2664
2665
2666
2667
2668 int clk_get_phase(struct clk *clk)
2669 {
2670 int ret;
2671
2672 if (!clk)
2673 return 0;
2674
2675 clk_prepare_lock();
2676 ret = clk_core_get_phase(clk->core);
2677 clk_prepare_unlock();
2678
2679 return ret;
2680 }
2681 EXPORT_SYMBOL_GPL(clk_get_phase);
2682
2683 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2684 {
2685
2686 core->duty.num = 1;
2687 core->duty.den = 2;
2688 }
2689
2690 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2691
2692 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2693 {
2694 struct clk_duty *duty = &core->duty;
2695 int ret = 0;
2696
2697 if (!core->ops->get_duty_cycle)
2698 return clk_core_update_duty_cycle_parent_nolock(core);
2699
2700 ret = core->ops->get_duty_cycle(core->hw, duty);
2701 if (ret)
2702 goto reset;
2703
2704
2705 if (duty->den == 0 || duty->num > duty->den) {
2706 ret = -EINVAL;
2707 goto reset;
2708 }
2709
2710 return 0;
2711
2712 reset:
2713 clk_core_reset_duty_cycle_nolock(core);
2714 return ret;
2715 }
2716
2717 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2718 {
2719 int ret = 0;
2720
2721 if (core->parent &&
2722 core->flags & CLK_DUTY_CYCLE_PARENT) {
2723 ret = clk_core_update_duty_cycle_nolock(core->parent);
2724 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2725 } else {
2726 clk_core_reset_duty_cycle_nolock(core);
2727 }
2728
2729 return ret;
2730 }
2731
2732 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2733 struct clk_duty *duty);
2734
2735 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2736 struct clk_duty *duty)
2737 {
2738 int ret;
2739
2740 lockdep_assert_held(&prepare_lock);
2741
2742 if (clk_core_rate_is_protected(core))
2743 return -EBUSY;
2744
2745 trace_clk_set_duty_cycle(core, duty);
2746
2747 if (!core->ops->set_duty_cycle)
2748 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2749
2750 ret = core->ops->set_duty_cycle(core->hw, duty);
2751 if (!ret)
2752 memcpy(&core->duty, duty, sizeof(*duty));
2753
2754 trace_clk_set_duty_cycle_complete(core, duty);
2755
2756 return ret;
2757 }
2758
2759 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2760 struct clk_duty *duty)
2761 {
2762 int ret = 0;
2763
2764 if (core->parent &&
2765 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2766 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2767 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2768 }
2769
2770 return ret;
2771 }
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2785 {
2786 int ret;
2787 struct clk_duty duty;
2788
2789 if (!clk)
2790 return 0;
2791
2792
2793 if (den == 0 || num > den)
2794 return -EINVAL;
2795
2796 duty.num = num;
2797 duty.den = den;
2798
2799 clk_prepare_lock();
2800
2801 if (clk->exclusive_count)
2802 clk_core_rate_unprotect(clk->core);
2803
2804 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2805
2806 if (clk->exclusive_count)
2807 clk_core_rate_protect(clk->core);
2808
2809 clk_prepare_unlock();
2810
2811 return ret;
2812 }
2813 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2814
2815 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2816 unsigned int scale)
2817 {
2818 struct clk_duty *duty = &core->duty;
2819 int ret;
2820
2821 clk_prepare_lock();
2822
2823 ret = clk_core_update_duty_cycle_nolock(core);
2824 if (!ret)
2825 ret = mult_frac(scale, duty->num, duty->den);
2826
2827 clk_prepare_unlock();
2828
2829 return ret;
2830 }
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2841 {
2842 if (!clk)
2843 return 0;
2844
2845 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2846 }
2847 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860 bool clk_is_match(const struct clk *p, const struct clk *q)
2861 {
2862
2863 if (p == q)
2864 return true;
2865
2866
2867 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2868 if (p->core == q->core)
2869 return true;
2870
2871 return false;
2872 }
2873 EXPORT_SYMBOL_GPL(clk_is_match);
2874
2875
2876
2877 #ifdef CONFIG_DEBUG_FS
2878 #include <linux/debugfs.h>
2879
2880 static struct dentry *rootdir;
2881 static int inited = 0;
2882 static DEFINE_MUTEX(clk_debug_lock);
2883 static HLIST_HEAD(clk_debug_list);
2884
2885 static struct hlist_head *orphan_list[] = {
2886 &clk_orphan_list,
2887 NULL,
2888 };
2889
2890 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2891 int level)
2892 {
2893 int phase;
2894
2895 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2896 level * 3 + 1, "",
2897 30 - level * 3, c->name,
2898 c->enable_count, c->prepare_count, c->protect_count,
2899 clk_core_get_rate(c), clk_core_get_accuracy(c));
2900
2901 phase = clk_core_get_phase(c);
2902 if (phase >= 0)
2903 seq_printf(s, "%5d", phase);
2904 else
2905 seq_puts(s, "-----");
2906
2907 seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
2908 }
2909
2910 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2911 int level)
2912 {
2913 struct clk_core *child;
2914
2915 clk_summary_show_one(s, c, level);
2916
2917 hlist_for_each_entry(child, &c->children, child_node)
2918 clk_summary_show_subtree(s, child, level + 1);
2919 }
2920
2921 static int clk_summary_show(struct seq_file *s, void *data)
2922 {
2923 struct clk_core *c;
2924 struct hlist_head **lists = (struct hlist_head **)s->private;
2925
2926 seq_puts(s, " enable prepare protect duty\n");
2927 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2928 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2929
2930 clk_prepare_lock();
2931
2932 for (; *lists; lists++)
2933 hlist_for_each_entry(c, *lists, child_node)
2934 clk_summary_show_subtree(s, c, 0);
2935
2936 clk_prepare_unlock();
2937
2938 return 0;
2939 }
2940 DEFINE_SHOW_ATTRIBUTE(clk_summary);
2941
2942 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2943 {
2944 int phase;
2945 unsigned long min_rate, max_rate;
2946
2947 clk_core_get_boundaries(c, &min_rate, &max_rate);
2948
2949
2950 seq_printf(s, "\"%s\": { ", c->name);
2951 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2952 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2953 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2954 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2955 seq_printf(s, "\"min_rate\": %lu,", min_rate);
2956 seq_printf(s, "\"max_rate\": %lu,", max_rate);
2957 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2958 phase = clk_core_get_phase(c);
2959 if (phase >= 0)
2960 seq_printf(s, "\"phase\": %d,", phase);
2961 seq_printf(s, "\"duty_cycle\": %u",
2962 clk_core_get_scaled_duty_cycle(c, 100000));
2963 }
2964
2965 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2966 {
2967 struct clk_core *child;
2968
2969 clk_dump_one(s, c, level);
2970
2971 hlist_for_each_entry(child, &c->children, child_node) {
2972 seq_putc(s, ',');
2973 clk_dump_subtree(s, child, level + 1);
2974 }
2975
2976 seq_putc(s, '}');
2977 }
2978
2979 static int clk_dump_show(struct seq_file *s, void *data)
2980 {
2981 struct clk_core *c;
2982 bool first_node = true;
2983 struct hlist_head **lists = (struct hlist_head **)s->private;
2984
2985 seq_putc(s, '{');
2986 clk_prepare_lock();
2987
2988 for (; *lists; lists++) {
2989 hlist_for_each_entry(c, *lists, child_node) {
2990 if (!first_node)
2991 seq_putc(s, ',');
2992 first_node = false;
2993 clk_dump_subtree(s, c, 0);
2994 }
2995 }
2996
2997 clk_prepare_unlock();
2998
2999 seq_puts(s, "}\n");
3000 return 0;
3001 }
3002 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3003
3004 static const struct {
3005 unsigned long flag;
3006 const char *name;
3007 } clk_flags[] = {
3008 #define ENTRY(f) { f, #f }
3009 ENTRY(CLK_SET_RATE_GATE),
3010 ENTRY(CLK_SET_PARENT_GATE),
3011 ENTRY(CLK_SET_RATE_PARENT),
3012 ENTRY(CLK_IGNORE_UNUSED),
3013 ENTRY(CLK_GET_RATE_NOCACHE),
3014 ENTRY(CLK_SET_RATE_NO_REPARENT),
3015 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3016 ENTRY(CLK_RECALC_NEW_RATES),
3017 ENTRY(CLK_SET_RATE_UNGATE),
3018 ENTRY(CLK_IS_CRITICAL),
3019 ENTRY(CLK_OPS_PARENT_ENABLE),
3020 ENTRY(CLK_DUTY_CYCLE_PARENT),
3021 #undef ENTRY
3022 };
3023
3024 static int clk_flags_show(struct seq_file *s, void *data)
3025 {
3026 struct clk_core *core = s->private;
3027 unsigned long flags = core->flags;
3028 unsigned int i;
3029
3030 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3031 if (flags & clk_flags[i].flag) {
3032 seq_printf(s, "%s\n", clk_flags[i].name);
3033 flags &= ~clk_flags[i].flag;
3034 }
3035 }
3036 if (flags) {
3037
3038 seq_printf(s, "0x%lx\n", flags);
3039 }
3040
3041 return 0;
3042 }
3043 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3044
3045 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3046 unsigned int i, char terminator)
3047 {
3048 struct clk_core *parent;
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062 parent = clk_core_get_parent_by_index(core, i);
3063 if (parent)
3064 seq_puts(s, parent->name);
3065 else if (core->parents[i].name)
3066 seq_puts(s, core->parents[i].name);
3067 else if (core->parents[i].fw_name)
3068 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3069 else if (core->parents[i].index >= 0)
3070 seq_puts(s,
3071 of_clk_get_parent_name(core->of_node,
3072 core->parents[i].index));
3073 else
3074 seq_puts(s, "(missing)");
3075
3076 seq_putc(s, terminator);
3077 }
3078
3079 static int possible_parents_show(struct seq_file *s, void *data)
3080 {
3081 struct clk_core *core = s->private;
3082 int i;
3083
3084 for (i = 0; i < core->num_parents - 1; i++)
3085 possible_parent_show(s, core, i, ' ');
3086
3087 possible_parent_show(s, core, i, '\n');
3088
3089 return 0;
3090 }
3091 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3092
3093 static int current_parent_show(struct seq_file *s, void *data)
3094 {
3095 struct clk_core *core = s->private;
3096
3097 if (core->parent)
3098 seq_printf(s, "%s\n", core->parent->name);
3099
3100 return 0;
3101 }
3102 DEFINE_SHOW_ATTRIBUTE(current_parent);
3103
3104 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3105 {
3106 struct clk_core *core = s->private;
3107 struct clk_duty *duty = &core->duty;
3108
3109 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3110
3111 return 0;
3112 }
3113 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3114
3115 static int clk_min_rate_show(struct seq_file *s, void *data)
3116 {
3117 struct clk_core *core = s->private;
3118 unsigned long min_rate, max_rate;
3119
3120 clk_prepare_lock();
3121 clk_core_get_boundaries(core, &min_rate, &max_rate);
3122 clk_prepare_unlock();
3123 seq_printf(s, "%lu\n", min_rate);
3124
3125 return 0;
3126 }
3127 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3128
3129 static int clk_max_rate_show(struct seq_file *s, void *data)
3130 {
3131 struct clk_core *core = s->private;
3132 unsigned long min_rate, max_rate;
3133
3134 clk_prepare_lock();
3135 clk_core_get_boundaries(core, &min_rate, &max_rate);
3136 clk_prepare_unlock();
3137 seq_printf(s, "%lu\n", max_rate);
3138
3139 return 0;
3140 }
3141 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3142
3143 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3144 {
3145 struct dentry *root;
3146
3147 if (!core || !pdentry)
3148 return;
3149
3150 root = debugfs_create_dir(core->name, pdentry);
3151 core->dentry = root;
3152
3153 debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
3154 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3155 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3156 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3157 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3158 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3159 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3160 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3161 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3162 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3163 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3164 &clk_duty_cycle_fops);
3165
3166 if (core->num_parents > 0)
3167 debugfs_create_file("clk_parent", 0444, root, core,
3168 ¤t_parent_fops);
3169
3170 if (core->num_parents > 1)
3171 debugfs_create_file("clk_possible_parents", 0444, root, core,
3172 &possible_parents_fops);
3173
3174 if (core->ops->debug_init)
3175 core->ops->debug_init(core->hw, core->dentry);
3176 }
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186 static void clk_debug_register(struct clk_core *core)
3187 {
3188 mutex_lock(&clk_debug_lock);
3189 hlist_add_head(&core->debug_node, &clk_debug_list);
3190 if (inited)
3191 clk_debug_create_one(core, rootdir);
3192 mutex_unlock(&clk_debug_lock);
3193 }
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203 static void clk_debug_unregister(struct clk_core *core)
3204 {
3205 mutex_lock(&clk_debug_lock);
3206 hlist_del_init(&core->debug_node);
3207 debugfs_remove_recursive(core->dentry);
3208 core->dentry = NULL;
3209 mutex_unlock(&clk_debug_lock);
3210 }
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221 static int __init clk_debug_init(void)
3222 {
3223 struct clk_core *core;
3224
3225 rootdir = debugfs_create_dir("clk", NULL);
3226
3227 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3228 &clk_summary_fops);
3229 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3230 &clk_dump_fops);
3231 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3232 &clk_summary_fops);
3233 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3234 &clk_dump_fops);
3235
3236 mutex_lock(&clk_debug_lock);
3237 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3238 clk_debug_create_one(core, rootdir);
3239
3240 inited = 1;
3241 mutex_unlock(&clk_debug_lock);
3242
3243 return 0;
3244 }
3245 late_initcall(clk_debug_init);
3246 #else
3247 static inline void clk_debug_register(struct clk_core *core) { }
3248 static inline void clk_debug_reparent(struct clk_core *core,
3249 struct clk_core *new_parent)
3250 {
3251 }
3252 static inline void clk_debug_unregister(struct clk_core *core)
3253 {
3254 }
3255 #endif
3256
3257 static void clk_core_reparent_orphans_nolock(void)
3258 {
3259 struct clk_core *orphan;
3260 struct hlist_node *tmp2;
3261
3262
3263
3264
3265
3266 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3267 struct clk_core *parent = __clk_init_parent(orphan);
3268
3269
3270
3271
3272
3273
3274
3275 if (parent) {
3276
3277 __clk_set_parent_before(orphan, parent);
3278 __clk_set_parent_after(orphan, parent, NULL);
3279 __clk_recalc_accuracies(orphan);
3280 __clk_recalc_rates(orphan, 0);
3281 }
3282 }
3283 }
3284
3285
3286
3287
3288
3289
3290
3291
3292 static int __clk_core_init(struct clk_core *core)
3293 {
3294 int ret;
3295 unsigned long rate;
3296
3297 if (!core)
3298 return -EINVAL;
3299
3300 clk_prepare_lock();
3301
3302 ret = clk_pm_runtime_get(core);
3303 if (ret)
3304 goto unlock;
3305
3306
3307 if (clk_core_lookup(core->name)) {
3308 pr_debug("%s: clk %s already initialized\n",
3309 __func__, core->name);
3310 ret = -EEXIST;
3311 goto out;
3312 }
3313
3314
3315 if (core->ops->set_rate &&
3316 !((core->ops->round_rate || core->ops->determine_rate) &&
3317 core->ops->recalc_rate)) {
3318 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3319 __func__, core->name);
3320 ret = -EINVAL;
3321 goto out;
3322 }
3323
3324 if (core->ops->set_parent && !core->ops->get_parent) {
3325 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3326 __func__, core->name);
3327 ret = -EINVAL;
3328 goto out;
3329 }
3330
3331 if (core->num_parents > 1 && !core->ops->get_parent) {
3332 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3333 __func__, core->name);
3334 ret = -EINVAL;
3335 goto out;
3336 }
3337
3338 if (core->ops->set_rate_and_parent &&
3339 !(core->ops->set_parent && core->ops->set_rate)) {
3340 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3341 __func__, core->name);
3342 ret = -EINVAL;
3343 goto out;
3344 }
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357 if (core->ops->init)
3358 core->ops->init(core->hw);
3359
3360
3361 core->parent = __clk_init_parent(core);
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373 if (core->parent) {
3374 hlist_add_head(&core->child_node,
3375 &core->parent->children);
3376 core->orphan = core->parent->orphan;
3377 } else if (!core->num_parents) {
3378 hlist_add_head(&core->child_node, &clk_root_list);
3379 core->orphan = false;
3380 } else {
3381 hlist_add_head(&core->child_node, &clk_orphan_list);
3382 core->orphan = true;
3383 }
3384
3385
3386
3387
3388
3389
3390
3391
3392 if (core->ops->recalc_accuracy)
3393 core->accuracy = core->ops->recalc_accuracy(core->hw,
3394 __clk_get_accuracy(core->parent));
3395 else if (core->parent)
3396 core->accuracy = core->parent->accuracy;
3397 else
3398 core->accuracy = 0;
3399
3400
3401
3402
3403
3404
3405 clk_core_get_phase(core);
3406
3407
3408
3409
3410 clk_core_update_duty_cycle_nolock(core);
3411
3412
3413
3414
3415
3416
3417
3418 if (core->ops->recalc_rate)
3419 rate = core->ops->recalc_rate(core->hw,
3420 clk_core_get_rate_nolock(core->parent));
3421 else if (core->parent)
3422 rate = core->parent->rate;
3423 else
3424 rate = 0;
3425 core->rate = core->req_rate = rate;
3426
3427
3428
3429
3430
3431
3432 if (core->flags & CLK_IS_CRITICAL) {
3433 unsigned long flags;
3434
3435 ret = clk_core_prepare(core);
3436 if (ret)
3437 goto out;
3438
3439 flags = clk_enable_lock();
3440 ret = clk_core_enable(core);
3441 clk_enable_unlock(flags);
3442 if (ret) {
3443 clk_core_unprepare(core);
3444 goto out;
3445 }
3446 }
3447
3448 clk_core_reparent_orphans_nolock();
3449
3450
3451 kref_init(&core->ref);
3452 out:
3453 clk_pm_runtime_put(core);
3454 unlock:
3455 if (ret)
3456 hlist_del_init(&core->child_node);
3457
3458 clk_prepare_unlock();
3459
3460 if (!ret)
3461 clk_debug_register(core);
3462
3463 return ret;
3464 }
3465
3466
3467
3468
3469
3470
3471 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3472 {
3473 clk_prepare_lock();
3474 hlist_add_head(&clk->clks_node, &core->clks);
3475 clk_prepare_unlock();
3476 }
3477
3478
3479
3480
3481
3482 static void clk_core_unlink_consumer(struct clk *clk)
3483 {
3484 lockdep_assert_held(&prepare_lock);
3485 hlist_del(&clk->clks_node);
3486 }
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3497 const char *con_id)
3498 {
3499 struct clk *clk;
3500
3501 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3502 if (!clk)
3503 return ERR_PTR(-ENOMEM);
3504
3505 clk->core = core;
3506 clk->dev_id = dev_id;
3507 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3508 clk->max_rate = ULONG_MAX;
3509
3510 return clk;
3511 }
3512
3513
3514
3515
3516
3517
3518
3519
3520 static void free_clk(struct clk *clk)
3521 {
3522 kfree_const(clk->con_id);
3523 kfree(clk);
3524 }
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3539 const char *dev_id, const char *con_id)
3540 {
3541 struct clk *clk;
3542 struct clk_core *core;
3543
3544
3545 if (IS_ERR_OR_NULL(hw))
3546 return ERR_CAST(hw);
3547
3548 core = hw->core;
3549 clk = alloc_clk(core, dev_id, con_id);
3550 if (IS_ERR(clk))
3551 return clk;
3552 clk->dev = dev;
3553
3554 if (!try_module_get(core->owner)) {
3555 free_clk(clk);
3556 return ERR_PTR(-ENOENT);
3557 }
3558
3559 kref_get(&core->ref);
3560 clk_core_link_consumer(core, clk);
3561
3562 return clk;
3563 }
3564
3565 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3566 {
3567 const char *dst;
3568
3569 if (!src) {
3570 if (must_exist)
3571 return -EINVAL;
3572 return 0;
3573 }
3574
3575 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3576 if (!dst)
3577 return -ENOMEM;
3578
3579 return 0;
3580 }
3581
3582 static int clk_core_populate_parent_map(struct clk_core *core,
3583 const struct clk_init_data *init)
3584 {
3585 u8 num_parents = init->num_parents;
3586 const char * const *parent_names = init->parent_names;
3587 const struct clk_hw **parent_hws = init->parent_hws;
3588 const struct clk_parent_data *parent_data = init->parent_data;
3589 int i, ret = 0;
3590 struct clk_parent_map *parents, *parent;
3591
3592 if (!num_parents)
3593 return 0;
3594
3595
3596
3597
3598
3599 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3600 core->parents = parents;
3601 if (!parents)
3602 return -ENOMEM;
3603
3604
3605 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3606 parent->index = -1;
3607 if (parent_names) {
3608
3609 WARN(!parent_names[i],
3610 "%s: invalid NULL in %s's .parent_names\n",
3611 __func__, core->name);
3612 ret = clk_cpy_name(&parent->name, parent_names[i],
3613 true);
3614 } else if (parent_data) {
3615 parent->hw = parent_data[i].hw;
3616 parent->index = parent_data[i].index;
3617 ret = clk_cpy_name(&parent->fw_name,
3618 parent_data[i].fw_name, false);
3619 if (!ret)
3620 ret = clk_cpy_name(&parent->name,
3621 parent_data[i].name,
3622 false);
3623 } else if (parent_hws) {
3624 parent->hw = parent_hws[i];
3625 } else {
3626 ret = -EINVAL;
3627 WARN(1, "Must specify parents if num_parents > 0\n");
3628 }
3629
3630 if (ret) {
3631 do {
3632 kfree_const(parents[i].name);
3633 kfree_const(parents[i].fw_name);
3634 } while (--i >= 0);
3635 kfree(parents);
3636
3637 return ret;
3638 }
3639 }
3640
3641 return 0;
3642 }
3643
3644 static void clk_core_free_parent_map(struct clk_core *core)
3645 {
3646 int i = core->num_parents;
3647
3648 if (!core->num_parents)
3649 return;
3650
3651 while (--i >= 0) {
3652 kfree_const(core->parents[i].name);
3653 kfree_const(core->parents[i].fw_name);
3654 }
3655
3656 kfree(core->parents);
3657 }
3658
3659 static struct clk *
3660 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3661 {
3662 int ret;
3663 struct clk_core *core;
3664 const struct clk_init_data *init = hw->init;
3665
3666
3667
3668
3669
3670
3671 hw->init = NULL;
3672
3673 core = kzalloc(sizeof(*core), GFP_KERNEL);
3674 if (!core) {
3675 ret = -ENOMEM;
3676 goto fail_out;
3677 }
3678
3679 core->name = kstrdup_const(init->name, GFP_KERNEL);
3680 if (!core->name) {
3681 ret = -ENOMEM;
3682 goto fail_name;
3683 }
3684
3685 if (WARN_ON(!init->ops)) {
3686 ret = -EINVAL;
3687 goto fail_ops;
3688 }
3689 core->ops = init->ops;
3690
3691 if (dev && pm_runtime_enabled(dev))
3692 core->rpm_enabled = true;
3693 core->dev = dev;
3694 core->of_node = np;
3695 if (dev && dev->driver)
3696 core->owner = dev->driver->owner;
3697 core->hw = hw;
3698 core->flags = init->flags;
3699 core->num_parents = init->num_parents;
3700 core->min_rate = 0;
3701 core->max_rate = ULONG_MAX;
3702 hw->core = core;
3703
3704 ret = clk_core_populate_parent_map(core, init);
3705 if (ret)
3706 goto fail_parents;
3707
3708 INIT_HLIST_HEAD(&core->clks);
3709
3710
3711
3712
3713
3714 hw->clk = alloc_clk(core, NULL, NULL);
3715 if (IS_ERR(hw->clk)) {
3716 ret = PTR_ERR(hw->clk);
3717 goto fail_create_clk;
3718 }
3719
3720 clk_core_link_consumer(hw->core, hw->clk);
3721
3722 ret = __clk_core_init(core);
3723 if (!ret)
3724 return hw->clk;
3725
3726 clk_prepare_lock();
3727 clk_core_unlink_consumer(hw->clk);
3728 clk_prepare_unlock();
3729
3730 free_clk(hw->clk);
3731 hw->clk = NULL;
3732
3733 fail_create_clk:
3734 clk_core_free_parent_map(core);
3735 fail_parents:
3736 fail_ops:
3737 kfree_const(core->name);
3738 fail_name:
3739 kfree(core);
3740 fail_out:
3741 return ERR_PTR(ret);
3742 }
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752 static struct device_node *dev_or_parent_of_node(struct device *dev)
3753 {
3754 struct device_node *np;
3755
3756 if (!dev)
3757 return NULL;
3758
3759 np = dev_of_node(dev);
3760 if (!np)
3761 np = dev_of_node(dev->parent);
3762
3763 return np;
3764 }
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3780 {
3781 return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3782 }
3783 EXPORT_SYMBOL_GPL(clk_register);
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795 int clk_hw_register(struct device *dev, struct clk_hw *hw)
3796 {
3797 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3798 hw));
3799 }
3800 EXPORT_SYMBOL_GPL(clk_hw_register);
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3814 {
3815 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3816 }
3817 EXPORT_SYMBOL_GPL(of_clk_hw_register);
3818
3819
3820 static void __clk_release(struct kref *ref)
3821 {
3822 struct clk_core *core = container_of(ref, struct clk_core, ref);
3823
3824 lockdep_assert_held(&prepare_lock);
3825
3826 clk_core_free_parent_map(core);
3827 kfree_const(core->name);
3828 kfree(core);
3829 }
3830
3831
3832
3833
3834
3835
3836 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3837 {
3838 return -ENXIO;
3839 }
3840
3841 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3842 {
3843 WARN_ON_ONCE(1);
3844 }
3845
3846 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3847 unsigned long parent_rate)
3848 {
3849 return -ENXIO;
3850 }
3851
3852 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3853 {
3854 return -ENXIO;
3855 }
3856
3857 static const struct clk_ops clk_nodrv_ops = {
3858 .enable = clk_nodrv_prepare_enable,
3859 .disable = clk_nodrv_disable_unprepare,
3860 .prepare = clk_nodrv_prepare_enable,
3861 .unprepare = clk_nodrv_disable_unprepare,
3862 .set_rate = clk_nodrv_set_rate,
3863 .set_parent = clk_nodrv_set_parent,
3864 };
3865
3866 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3867 struct clk_core *target)
3868 {
3869 int i;
3870 struct clk_core *child;
3871
3872 for (i = 0; i < root->num_parents; i++)
3873 if (root->parents[i].core == target)
3874 root->parents[i].core = NULL;
3875
3876 hlist_for_each_entry(child, &root->children, child_node)
3877 clk_core_evict_parent_cache_subtree(child, target);
3878 }
3879
3880
3881 static void clk_core_evict_parent_cache(struct clk_core *core)
3882 {
3883 struct hlist_head **lists;
3884 struct clk_core *root;
3885
3886 lockdep_assert_held(&prepare_lock);
3887
3888 for (lists = all_lists; *lists; lists++)
3889 hlist_for_each_entry(root, *lists, child_node)
3890 clk_core_evict_parent_cache_subtree(root, core);
3891
3892 }
3893
3894
3895
3896
3897
3898 void clk_unregister(struct clk *clk)
3899 {
3900 unsigned long flags;
3901
3902 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3903 return;
3904
3905 clk_debug_unregister(clk->core);
3906
3907 clk_prepare_lock();
3908
3909 if (clk->core->ops == &clk_nodrv_ops) {
3910 pr_err("%s: unregistered clock: %s\n", __func__,
3911 clk->core->name);
3912 goto unlock;
3913 }
3914
3915
3916
3917
3918 flags = clk_enable_lock();
3919 clk->core->ops = &clk_nodrv_ops;
3920 clk_enable_unlock(flags);
3921
3922 if (!hlist_empty(&clk->core->children)) {
3923 struct clk_core *child;
3924 struct hlist_node *t;
3925
3926
3927 hlist_for_each_entry_safe(child, t, &clk->core->children,
3928 child_node)
3929 clk_core_set_parent_nolock(child, NULL);
3930 }
3931
3932 clk_core_evict_parent_cache(clk->core);
3933
3934 hlist_del_init(&clk->core->child_node);
3935
3936 if (clk->core->prepare_count)
3937 pr_warn("%s: unregistering prepared clock: %s\n",
3938 __func__, clk->core->name);
3939
3940 if (clk->core->protect_count)
3941 pr_warn("%s: unregistering protected clock: %s\n",
3942 __func__, clk->core->name);
3943
3944 kref_put(&clk->core->ref, __clk_release);
3945 free_clk(clk);
3946 unlock:
3947 clk_prepare_unlock();
3948 }
3949 EXPORT_SYMBOL_GPL(clk_unregister);
3950
3951
3952
3953
3954
3955 void clk_hw_unregister(struct clk_hw *hw)
3956 {
3957 clk_unregister(hw->clk);
3958 }
3959 EXPORT_SYMBOL_GPL(clk_hw_unregister);
3960
3961 static void devm_clk_release(struct device *dev, void *res)
3962 {
3963 clk_unregister(*(struct clk **)res);
3964 }
3965
3966 static void devm_clk_hw_release(struct device *dev, void *res)
3967 {
3968 clk_hw_unregister(*(struct clk_hw **)res);
3969 }
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3982 {
3983 struct clk *clk;
3984 struct clk **clkp;
3985
3986 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3987 if (!clkp)
3988 return ERR_PTR(-ENOMEM);
3989
3990 clk = clk_register(dev, hw);
3991 if (!IS_ERR(clk)) {
3992 *clkp = clk;
3993 devres_add(dev, clkp);
3994 } else {
3995 devres_free(clkp);
3996 }
3997
3998 return clk;
3999 }
4000 EXPORT_SYMBOL_GPL(devm_clk_register);
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4012 {
4013 struct clk_hw **hwp;
4014 int ret;
4015
4016 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4017 if (!hwp)
4018 return -ENOMEM;
4019
4020 ret = clk_hw_register(dev, hw);
4021 if (!ret) {
4022 *hwp = hw;
4023 devres_add(dev, hwp);
4024 } else {
4025 devres_free(hwp);
4026 }
4027
4028 return ret;
4029 }
4030 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4031
4032 static int devm_clk_match(struct device *dev, void *res, void *data)
4033 {
4034 struct clk *c = res;
4035 if (WARN_ON(!c))
4036 return 0;
4037 return c == data;
4038 }
4039
4040 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4041 {
4042 struct clk_hw *hw = res;
4043
4044 if (WARN_ON(!hw))
4045 return 0;
4046 return hw == data;
4047 }
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 void devm_clk_unregister(struct device *dev, struct clk *clk)
4058 {
4059 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4060 }
4061 EXPORT_SYMBOL_GPL(devm_clk_unregister);
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4073 {
4074 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4075 hw));
4076 }
4077 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4078
4079
4080
4081
4082
4083 void __clk_put(struct clk *clk)
4084 {
4085 struct module *owner;
4086
4087 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4088 return;
4089
4090 clk_prepare_lock();
4091
4092
4093
4094
4095
4096
4097 if (WARN_ON(clk->exclusive_count)) {
4098
4099 clk->core->protect_count -= (clk->exclusive_count - 1);
4100 clk_core_rate_unprotect(clk->core);
4101 clk->exclusive_count = 0;
4102 }
4103
4104 hlist_del(&clk->clks_node);
4105 if (clk->min_rate > clk->core->req_rate ||
4106 clk->max_rate < clk->core->req_rate)
4107 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4108
4109 owner = clk->core->owner;
4110 kref_put(&clk->core->ref, __clk_release);
4111
4112 clk_prepare_unlock();
4113
4114 module_put(owner);
4115
4116 free_clk(clk);
4117 }
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4142 {
4143 struct clk_notifier *cn;
4144 int ret = -ENOMEM;
4145
4146 if (!clk || !nb)
4147 return -EINVAL;
4148
4149 clk_prepare_lock();
4150
4151
4152 list_for_each_entry(cn, &clk_notifier_list, node)
4153 if (cn->clk == clk)
4154 break;
4155
4156
4157 if (cn->clk != clk) {
4158 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4159 if (!cn)
4160 goto out;
4161
4162 cn->clk = clk;
4163 srcu_init_notifier_head(&cn->notifier_head);
4164
4165 list_add(&cn->node, &clk_notifier_list);
4166 }
4167
4168 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4169
4170 clk->core->notifier_count++;
4171
4172 out:
4173 clk_prepare_unlock();
4174
4175 return ret;
4176 }
4177 EXPORT_SYMBOL_GPL(clk_notifier_register);
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4191 {
4192 struct clk_notifier *cn = NULL;
4193 int ret = -EINVAL;
4194
4195 if (!clk || !nb)
4196 return -EINVAL;
4197
4198 clk_prepare_lock();
4199
4200 list_for_each_entry(cn, &clk_notifier_list, node)
4201 if (cn->clk == clk)
4202 break;
4203
4204 if (cn->clk == clk) {
4205 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4206
4207 clk->core->notifier_count--;
4208
4209
4210 if (!cn->notifier_head.head) {
4211 srcu_cleanup_notifier_head(&cn->notifier_head);
4212 list_del(&cn->node);
4213 kfree(cn);
4214 }
4215
4216 } else {
4217 ret = -ENOENT;
4218 }
4219
4220 clk_prepare_unlock();
4221
4222 return ret;
4223 }
4224 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4225
4226 #ifdef CONFIG_OF
4227 static void clk_core_reparent_orphans(void)
4228 {
4229 clk_prepare_lock();
4230 clk_core_reparent_orphans_nolock();
4231 clk_prepare_unlock();
4232 }
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242 struct of_clk_provider {
4243 struct list_head link;
4244
4245 struct device_node *node;
4246 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4247 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4248 void *data;
4249 };
4250
4251 extern struct of_device_id __clk_of_table;
4252 static const struct of_device_id __clk_of_table_sentinel
4253 __used __section(__clk_of_table_end);
4254
4255 static LIST_HEAD(of_clk_providers);
4256 static DEFINE_MUTEX(of_clk_mutex);
4257
4258 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4259 void *data)
4260 {
4261 return data;
4262 }
4263 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4264
4265 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4266 {
4267 return data;
4268 }
4269 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4270
4271 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4272 {
4273 struct clk_onecell_data *clk_data = data;
4274 unsigned int idx = clkspec->args[0];
4275
4276 if (idx >= clk_data->clk_num) {
4277 pr_err("%s: invalid clock index %u\n", __func__, idx);
4278 return ERR_PTR(-EINVAL);
4279 }
4280
4281 return clk_data->clks[idx];
4282 }
4283 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4284
4285 struct clk_hw *
4286 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4287 {
4288 struct clk_hw_onecell_data *hw_data = data;
4289 unsigned int idx = clkspec->args[0];
4290
4291 if (idx >= hw_data->num) {
4292 pr_err("%s: invalid index %u\n", __func__, idx);
4293 return ERR_PTR(-EINVAL);
4294 }
4295
4296 return hw_data->hws[idx];
4297 }
4298 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308 int of_clk_add_provider(struct device_node *np,
4309 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4310 void *data),
4311 void *data)
4312 {
4313 struct of_clk_provider *cp;
4314 int ret;
4315
4316 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4317 if (!cp)
4318 return -ENOMEM;
4319
4320 cp->node = of_node_get(np);
4321 cp->data = data;
4322 cp->get = clk_src_get;
4323
4324 mutex_lock(&of_clk_mutex);
4325 list_add(&cp->link, &of_clk_providers);
4326 mutex_unlock(&of_clk_mutex);
4327 pr_debug("Added clock from %pOF\n", np);
4328
4329 clk_core_reparent_orphans();
4330
4331 ret = of_clk_set_defaults(np, true);
4332 if (ret < 0)
4333 of_clk_del_provider(np);
4334
4335 return ret;
4336 }
4337 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4338
4339
4340
4341
4342
4343
4344
4345 int of_clk_add_hw_provider(struct device_node *np,
4346 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4347 void *data),
4348 void *data)
4349 {
4350 struct of_clk_provider *cp;
4351 int ret;
4352
4353 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4354 if (!cp)
4355 return -ENOMEM;
4356
4357 cp->node = of_node_get(np);
4358 cp->data = data;
4359 cp->get_hw = get;
4360
4361 mutex_lock(&of_clk_mutex);
4362 list_add(&cp->link, &of_clk_providers);
4363 mutex_unlock(&of_clk_mutex);
4364 pr_debug("Added clk_hw provider from %pOF\n", np);
4365
4366 clk_core_reparent_orphans();
4367
4368 ret = of_clk_set_defaults(np, true);
4369 if (ret < 0)
4370 of_clk_del_provider(np);
4371
4372 return ret;
4373 }
4374 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4375
4376 static void devm_of_clk_release_provider(struct device *dev, void *res)
4377 {
4378 of_clk_del_provider(*(struct device_node **)res);
4379 }
4380
4381
4382
4383
4384
4385
4386 static struct device_node *get_clk_provider_node(struct device *dev)
4387 {
4388 struct device_node *np, *parent_np;
4389
4390 np = dev->of_node;
4391 parent_np = dev->parent ? dev->parent->of_node : NULL;
4392
4393 if (!of_find_property(np, "#clock-cells", NULL))
4394 if (of_find_property(parent_np, "#clock-cells", NULL))
4395 np = parent_np;
4396
4397 return np;
4398 }
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414 int devm_of_clk_add_hw_provider(struct device *dev,
4415 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4416 void *data),
4417 void *data)
4418 {
4419 struct device_node **ptr, *np;
4420 int ret;
4421
4422 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4423 GFP_KERNEL);
4424 if (!ptr)
4425 return -ENOMEM;
4426
4427 np = get_clk_provider_node(dev);
4428 ret = of_clk_add_hw_provider(np, get, data);
4429 if (!ret) {
4430 *ptr = np;
4431 devres_add(dev, ptr);
4432 } else {
4433 devres_free(ptr);
4434 }
4435
4436 return ret;
4437 }
4438 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4439
4440
4441
4442
4443
4444 void of_clk_del_provider(struct device_node *np)
4445 {
4446 struct of_clk_provider *cp;
4447
4448 mutex_lock(&of_clk_mutex);
4449 list_for_each_entry(cp, &of_clk_providers, link) {
4450 if (cp->node == np) {
4451 list_del(&cp->link);
4452 of_node_put(cp->node);
4453 kfree(cp);
4454 break;
4455 }
4456 }
4457 mutex_unlock(&of_clk_mutex);
4458 }
4459 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4460
4461 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4462 {
4463 struct device_node **np = res;
4464
4465 if (WARN_ON(!np || !*np))
4466 return 0;
4467
4468 return *np == data;
4469 }
4470
4471
4472
4473
4474
4475 void devm_of_clk_del_provider(struct device *dev)
4476 {
4477 int ret;
4478 struct device_node *np = get_clk_provider_node(dev);
4479
4480 ret = devres_release(dev, devm_of_clk_release_provider,
4481 devm_clk_provider_match, np);
4482
4483 WARN_ON(ret);
4484 }
4485 EXPORT_SYMBOL(devm_of_clk_del_provider);
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525 static int of_parse_clkspec(const struct device_node *np, int index,
4526 const char *name, struct of_phandle_args *out_args)
4527 {
4528 int ret = -ENOENT;
4529
4530
4531 while (np) {
4532
4533
4534
4535
4536
4537
4538 if (name)
4539 index = of_property_match_string(np, "clock-names", name);
4540 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4541 index, out_args);
4542 if (!ret)
4543 break;
4544 if (name && index >= 0)
4545 break;
4546
4547
4548
4549
4550
4551
4552 np = np->parent;
4553 if (np && !of_get_property(np, "clock-ranges", NULL))
4554 break;
4555 index = 0;
4556 }
4557
4558 return ret;
4559 }
4560
4561 static struct clk_hw *
4562 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4563 struct of_phandle_args *clkspec)
4564 {
4565 struct clk *clk;
4566
4567 if (provider->get_hw)
4568 return provider->get_hw(clkspec, provider->data);
4569
4570 clk = provider->get(clkspec, provider->data);
4571 if (IS_ERR(clk))
4572 return ERR_CAST(clk);
4573 return __clk_get_hw(clk);
4574 }
4575
4576 static struct clk_hw *
4577 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4578 {
4579 struct of_clk_provider *provider;
4580 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4581
4582 if (!clkspec)
4583 return ERR_PTR(-EINVAL);
4584
4585 mutex_lock(&of_clk_mutex);
4586 list_for_each_entry(provider, &of_clk_providers, link) {
4587 if (provider->node == clkspec->np) {
4588 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4589 if (!IS_ERR(hw))
4590 break;
4591 }
4592 }
4593 mutex_unlock(&of_clk_mutex);
4594
4595 return hw;
4596 }
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4607 {
4608 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4609
4610 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4611 }
4612 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4613
4614 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4615 const char *con_id)
4616 {
4617 int ret;
4618 struct clk_hw *hw;
4619 struct of_phandle_args clkspec;
4620
4621 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4622 if (ret)
4623 return ERR_PTR(ret);
4624
4625 hw = of_clk_get_hw_from_clkspec(&clkspec);
4626 of_node_put(clkspec.np);
4627
4628 return hw;
4629 }
4630
4631 static struct clk *__of_clk_get(struct device_node *np,
4632 int index, const char *dev_id,
4633 const char *con_id)
4634 {
4635 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4636
4637 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4638 }
4639
4640 struct clk *of_clk_get(struct device_node *np, int index)
4641 {
4642 return __of_clk_get(np, index, np->full_name, NULL);
4643 }
4644 EXPORT_SYMBOL(of_clk_get);
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4656 {
4657 if (!np)
4658 return ERR_PTR(-ENOENT);
4659
4660 return __of_clk_get(np, 0, np->full_name, name);
4661 }
4662 EXPORT_SYMBOL(of_clk_get_by_name);
4663
4664
4665
4666
4667
4668
4669
4670 unsigned int of_clk_get_parent_count(struct device_node *np)
4671 {
4672 int count;
4673
4674 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4675 if (count < 0)
4676 return 0;
4677
4678 return count;
4679 }
4680 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4681
4682 const char *of_clk_get_parent_name(struct device_node *np, int index)
4683 {
4684 struct of_phandle_args clkspec;
4685 struct property *prop;
4686 const char *clk_name;
4687 const __be32 *vp;
4688 u32 pv;
4689 int rc;
4690 int count;
4691 struct clk *clk;
4692
4693 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4694 &clkspec);
4695 if (rc)
4696 return NULL;
4697
4698 index = clkspec.args_count ? clkspec.args[0] : 0;
4699 count = 0;
4700
4701
4702
4703
4704 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4705 if (index == pv) {
4706 index = count;
4707 break;
4708 }
4709 count++;
4710 }
4711
4712 if (prop && !vp)
4713 return NULL;
4714
4715 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4716 index,
4717 &clk_name) < 0) {
4718
4719
4720
4721
4722
4723
4724 clk = of_clk_get_from_provider(&clkspec);
4725 if (IS_ERR(clk)) {
4726 if (clkspec.args_count == 0)
4727 clk_name = clkspec.np->name;
4728 else
4729 clk_name = NULL;
4730 } else {
4731 clk_name = __clk_get_name(clk);
4732 clk_put(clk);
4733 }
4734 }
4735
4736
4737 of_node_put(clkspec.np);
4738 return clk_name;
4739 }
4740 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751 int of_clk_parent_fill(struct device_node *np, const char **parents,
4752 unsigned int size)
4753 {
4754 unsigned int i = 0;
4755
4756 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4757 i++;
4758
4759 return i;
4760 }
4761 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4762
4763 struct clock_provider {
4764 void (*clk_init_cb)(struct device_node *);
4765 struct device_node *np;
4766 struct list_head node;
4767 };
4768
4769
4770
4771
4772
4773
4774 static int parent_ready(struct device_node *np)
4775 {
4776 int i = 0;
4777
4778 while (true) {
4779 struct clk *clk = of_clk_get(np, i);
4780
4781
4782 if (!IS_ERR(clk)) {
4783 clk_put(clk);
4784 i++;
4785 continue;
4786 }
4787
4788
4789 if (PTR_ERR(clk) == -EPROBE_DEFER)
4790 return 0;
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800 return 1;
4801 }
4802 }
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822 int of_clk_detect_critical(struct device_node *np,
4823 int index, unsigned long *flags)
4824 {
4825 struct property *prop;
4826 const __be32 *cur;
4827 uint32_t idx;
4828
4829 if (!np || !flags)
4830 return -EINVAL;
4831
4832 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4833 if (index == idx)
4834 *flags |= CLK_IS_CRITICAL;
4835
4836 return 0;
4837 }
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847 void __init of_clk_init(const struct of_device_id *matches)
4848 {
4849 const struct of_device_id *match;
4850 struct device_node *np;
4851 struct clock_provider *clk_provider, *next;
4852 bool is_init_done;
4853 bool force = false;
4854 LIST_HEAD(clk_provider_list);
4855
4856 if (!matches)
4857 matches = &__clk_of_table;
4858
4859
4860 for_each_matching_node_and_match(np, matches, &match) {
4861 struct clock_provider *parent;
4862
4863 if (!of_device_is_available(np))
4864 continue;
4865
4866 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4867 if (!parent) {
4868 list_for_each_entry_safe(clk_provider, next,
4869 &clk_provider_list, node) {
4870 list_del(&clk_provider->node);
4871 of_node_put(clk_provider->np);
4872 kfree(clk_provider);
4873 }
4874 of_node_put(np);
4875 return;
4876 }
4877
4878 parent->clk_init_cb = match->data;
4879 parent->np = of_node_get(np);
4880 list_add_tail(&parent->node, &clk_provider_list);
4881 }
4882
4883 while (!list_empty(&clk_provider_list)) {
4884 is_init_done = false;
4885 list_for_each_entry_safe(clk_provider, next,
4886 &clk_provider_list, node) {
4887 if (force || parent_ready(clk_provider->np)) {
4888
4889
4890 of_node_set_flag(clk_provider->np,
4891 OF_POPULATED);
4892
4893 clk_provider->clk_init_cb(clk_provider->np);
4894 of_clk_set_defaults(clk_provider->np, true);
4895
4896 list_del(&clk_provider->node);
4897 of_node_put(clk_provider->np);
4898 kfree(clk_provider);
4899 is_init_done = true;
4900 }
4901 }
4902
4903
4904
4905
4906
4907
4908
4909 if (!is_init_done)
4910 force = true;
4911 }
4912 }
4913 #endif