1/* 2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> 3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * Standard functionality for the common clock API. See Documentation/clk.txt 10 */ 11 12#include <linux/clk-provider.h> 13#include <linux/clk/clk-conf.h> 14#include <linux/module.h> 15#include <linux/mutex.h> 16#include <linux/spinlock.h> 17#include <linux/err.h> 18#include <linux/list.h> 19#include <linux/slab.h> 20#include <linux/of.h> 21#include <linux/device.h> 22#include <linux/init.h> 23#include <linux/sched.h> 24 25#include "clk.h" 26 27static DEFINE_SPINLOCK(enable_lock); 28static DEFINE_MUTEX(prepare_lock); 29 30static struct task_struct *prepare_owner; 31static struct task_struct *enable_owner; 32 33static int prepare_refcnt; 34static int enable_refcnt; 35 36static HLIST_HEAD(clk_root_list); 37static HLIST_HEAD(clk_orphan_list); 38static LIST_HEAD(clk_notifier_list); 39 40static long clk_core_get_accuracy(struct clk_core *clk); 41static unsigned long clk_core_get_rate(struct clk_core *clk); 42static int clk_core_get_phase(struct clk_core *clk); 43static bool clk_core_is_prepared(struct clk_core *clk); 44static bool clk_core_is_enabled(struct clk_core *clk); 45static struct clk_core *clk_core_lookup(const char *name); 46 47/*** private data structures ***/ 48 49struct clk_core { 50 const char *name; 51 const struct clk_ops *ops; 52 struct clk_hw *hw; 53 struct module *owner; 54 struct clk_core *parent; 55 const char **parent_names; 56 struct clk_core **parents; 57 u8 num_parents; 58 u8 new_parent_index; 59 unsigned long rate; 60 unsigned long req_rate; 61 unsigned long new_rate; 62 struct clk_core *new_parent; 63 struct clk_core *new_child; 64 unsigned long flags; 65 unsigned int enable_count; 66 unsigned int prepare_count; 67 unsigned long accuracy; 68 int phase; 69 struct hlist_head children; 70 struct hlist_node child_node; 71 struct hlist_node debug_node; 72 struct hlist_head clks; 73 unsigned int notifier_count; 74#ifdef CONFIG_DEBUG_FS 75 struct dentry *dentry; 76#endif 77 struct kref ref; 78}; 79 80#define CREATE_TRACE_POINTS 81#include <trace/events/clk.h> 82 83struct clk { 84 struct clk_core *core; 85 const char *dev_id; 86 const char *con_id; 87 unsigned long min_rate; 88 unsigned long max_rate; 89 struct hlist_node clks_node; 90}; 91 92/*** locking ***/ 93static void clk_prepare_lock(void) 94{ 95 if (!mutex_trylock(&prepare_lock)) { 96 if (prepare_owner == current) { 97 prepare_refcnt++; 98 return; 99 } 100 mutex_lock(&prepare_lock); 101 } 102 WARN_ON_ONCE(prepare_owner != NULL); 103 WARN_ON_ONCE(prepare_refcnt != 0); 104 prepare_owner = current; 105 prepare_refcnt = 1; 106} 107 108static void clk_prepare_unlock(void) 109{ 110 WARN_ON_ONCE(prepare_owner != current); 111 WARN_ON_ONCE(prepare_refcnt == 0); 112 113 if (--prepare_refcnt) 114 return; 115 prepare_owner = NULL; 116 mutex_unlock(&prepare_lock); 117} 118 119static unsigned long clk_enable_lock(void) 120{ 121 unsigned long flags; 122 123 if (!spin_trylock_irqsave(&enable_lock, flags)) { 124 if (enable_owner == current) { 125 enable_refcnt++; 126 return flags; 127 } 128 spin_lock_irqsave(&enable_lock, flags); 129 } 130 WARN_ON_ONCE(enable_owner != NULL); 131 WARN_ON_ONCE(enable_refcnt != 0); 132 enable_owner = current; 133 enable_refcnt = 1; 134 return flags; 135} 136 137static void clk_enable_unlock(unsigned long flags) 138{ 139 WARN_ON_ONCE(enable_owner != current); 140 WARN_ON_ONCE(enable_refcnt == 0); 141 142 if (--enable_refcnt) 143 return; 144 enable_owner = NULL; 145 spin_unlock_irqrestore(&enable_lock, flags); 146} 147 148/*** debugfs support ***/ 149 150#ifdef CONFIG_DEBUG_FS 151#include <linux/debugfs.h> 152 153static struct dentry *rootdir; 154static int inited = 0; 155static DEFINE_MUTEX(clk_debug_lock); 156static HLIST_HEAD(clk_debug_list); 157 158static struct hlist_head *all_lists[] = { 159 &clk_root_list, 160 &clk_orphan_list, 161 NULL, 162}; 163 164static struct hlist_head *orphan_list[] = { 165 &clk_orphan_list, 166 NULL, 167}; 168 169static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, 170 int level) 171{ 172 if (!c) 173 return; 174 175 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", 176 level * 3 + 1, "", 177 30 - level * 3, c->name, 178 c->enable_count, c->prepare_count, clk_core_get_rate(c), 179 clk_core_get_accuracy(c), clk_core_get_phase(c)); 180} 181 182static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, 183 int level) 184{ 185 struct clk_core *child; 186 187 if (!c) 188 return; 189 190 clk_summary_show_one(s, c, level); 191 192 hlist_for_each_entry(child, &c->children, child_node) 193 clk_summary_show_subtree(s, child, level + 1); 194} 195 196static int clk_summary_show(struct seq_file *s, void *data) 197{ 198 struct clk_core *c; 199 struct hlist_head **lists = (struct hlist_head **)s->private; 200 201 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); 202 seq_puts(s, "----------------------------------------------------------------------------------------\n"); 203 204 clk_prepare_lock(); 205 206 for (; *lists; lists++) 207 hlist_for_each_entry(c, *lists, child_node) 208 clk_summary_show_subtree(s, c, 0); 209 210 clk_prepare_unlock(); 211 212 return 0; 213} 214 215 216static int clk_summary_open(struct inode *inode, struct file *file) 217{ 218 return single_open(file, clk_summary_show, inode->i_private); 219} 220 221static const struct file_operations clk_summary_fops = { 222 .open = clk_summary_open, 223 .read = seq_read, 224 .llseek = seq_lseek, 225 .release = single_release, 226}; 227 228static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) 229{ 230 if (!c) 231 return; 232 233 /* This should be JSON format, i.e. elements separated with a comma */ 234 seq_printf(s, "\"%s\": { ", c->name); 235 seq_printf(s, "\"enable_count\": %d,", c->enable_count); 236 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); 237 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 238 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 239 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 240} 241 242static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) 243{ 244 struct clk_core *child; 245 246 if (!c) 247 return; 248 249 clk_dump_one(s, c, level); 250 251 hlist_for_each_entry(child, &c->children, child_node) { 252 seq_printf(s, ","); 253 clk_dump_subtree(s, child, level + 1); 254 } 255 256 seq_printf(s, "}"); 257} 258 259static int clk_dump(struct seq_file *s, void *data) 260{ 261 struct clk_core *c; 262 bool first_node = true; 263 struct hlist_head **lists = (struct hlist_head **)s->private; 264 265 seq_printf(s, "{"); 266 267 clk_prepare_lock(); 268 269 for (; *lists; lists++) { 270 hlist_for_each_entry(c, *lists, child_node) { 271 if (!first_node) 272 seq_puts(s, ","); 273 first_node = false; 274 clk_dump_subtree(s, c, 0); 275 } 276 } 277 278 clk_prepare_unlock(); 279 280 seq_printf(s, "}"); 281 return 0; 282} 283 284 285static int clk_dump_open(struct inode *inode, struct file *file) 286{ 287 return single_open(file, clk_dump, inode->i_private); 288} 289 290static const struct file_operations clk_dump_fops = { 291 .open = clk_dump_open, 292 .read = seq_read, 293 .llseek = seq_lseek, 294 .release = single_release, 295}; 296 297static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry) 298{ 299 struct dentry *d; 300 int ret = -ENOMEM; 301 302 if (!clk || !pdentry) { 303 ret = -EINVAL; 304 goto out; 305 } 306 307 d = debugfs_create_dir(clk->name, pdentry); 308 if (!d) 309 goto out; 310 311 clk->dentry = d; 312 313 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, 314 (u32 *)&clk->rate); 315 if (!d) 316 goto err_out; 317 318 d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry, 319 (u32 *)&clk->accuracy); 320 if (!d) 321 goto err_out; 322 323 d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry, 324 (u32 *)&clk->phase); 325 if (!d) 326 goto err_out; 327 328 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, 329 (u32 *)&clk->flags); 330 if (!d) 331 goto err_out; 332 333 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, 334 (u32 *)&clk->prepare_count); 335 if (!d) 336 goto err_out; 337 338 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, 339 (u32 *)&clk->enable_count); 340 if (!d) 341 goto err_out; 342 343 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, 344 (u32 *)&clk->notifier_count); 345 if (!d) 346 goto err_out; 347 348 if (clk->ops->debug_init) { 349 ret = clk->ops->debug_init(clk->hw, clk->dentry); 350 if (ret) 351 goto err_out; 352 } 353 354 ret = 0; 355 goto out; 356 357err_out: 358 debugfs_remove_recursive(clk->dentry); 359 clk->dentry = NULL; 360out: 361 return ret; 362} 363 364/** 365 * clk_debug_register - add a clk node to the debugfs clk tree 366 * @clk: the clk being added to the debugfs clk tree 367 * 368 * Dynamically adds a clk to the debugfs clk tree if debugfs has been 369 * initialized. Otherwise it bails out early since the debugfs clk tree 370 * will be created lazily by clk_debug_init as part of a late_initcall. 371 */ 372static int clk_debug_register(struct clk_core *clk) 373{ 374 int ret = 0; 375 376 mutex_lock(&clk_debug_lock); 377 hlist_add_head(&clk->debug_node, &clk_debug_list); 378 379 if (!inited) 380 goto unlock; 381 382 ret = clk_debug_create_one(clk, rootdir); 383unlock: 384 mutex_unlock(&clk_debug_lock); 385 386 return ret; 387} 388 389 /** 390 * clk_debug_unregister - remove a clk node from the debugfs clk tree 391 * @clk: the clk being removed from the debugfs clk tree 392 * 393 * Dynamically removes a clk and all it's children clk nodes from the 394 * debugfs clk tree if clk->dentry points to debugfs created by 395 * clk_debug_register in __clk_init. 396 */ 397static void clk_debug_unregister(struct clk_core *clk) 398{ 399 mutex_lock(&clk_debug_lock); 400 hlist_del_init(&clk->debug_node); 401 debugfs_remove_recursive(clk->dentry); 402 clk->dentry = NULL; 403 mutex_unlock(&clk_debug_lock); 404} 405 406struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, 407 void *data, const struct file_operations *fops) 408{ 409 struct dentry *d = NULL; 410 411 if (hw->core->dentry) 412 d = debugfs_create_file(name, mode, hw->core->dentry, data, 413 fops); 414 415 return d; 416} 417EXPORT_SYMBOL_GPL(clk_debugfs_add_file); 418 419/** 420 * clk_debug_init - lazily create the debugfs clk tree visualization 421 * 422 * clks are often initialized very early during boot before memory can 423 * be dynamically allocated and well before debugfs is setup. 424 * clk_debug_init walks the clk tree hierarchy while holding 425 * prepare_lock and creates the topology as part of a late_initcall, 426 * thus insuring that clks initialized very early will still be 427 * represented in the debugfs clk tree. This function should only be 428 * called once at boot-time, and all other clks added dynamically will 429 * be done so with clk_debug_register. 430 */ 431static int __init clk_debug_init(void) 432{ 433 struct clk_core *clk; 434 struct dentry *d; 435 436 rootdir = debugfs_create_dir("clk", NULL); 437 438 if (!rootdir) 439 return -ENOMEM; 440 441 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, 442 &clk_summary_fops); 443 if (!d) 444 return -ENOMEM; 445 446 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, 447 &clk_dump_fops); 448 if (!d) 449 return -ENOMEM; 450 451 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, 452 &orphan_list, &clk_summary_fops); 453 if (!d) 454 return -ENOMEM; 455 456 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, 457 &orphan_list, &clk_dump_fops); 458 if (!d) 459 return -ENOMEM; 460 461 mutex_lock(&clk_debug_lock); 462 hlist_for_each_entry(clk, &clk_debug_list, debug_node) 463 clk_debug_create_one(clk, rootdir); 464 465 inited = 1; 466 mutex_unlock(&clk_debug_lock); 467 468 return 0; 469} 470late_initcall(clk_debug_init); 471#else 472static inline int clk_debug_register(struct clk_core *clk) { return 0; } 473static inline void clk_debug_reparent(struct clk_core *clk, 474 struct clk_core *new_parent) 475{ 476} 477static inline void clk_debug_unregister(struct clk_core *clk) 478{ 479} 480#endif 481 482/* caller must hold prepare_lock */ 483static void clk_unprepare_unused_subtree(struct clk_core *clk) 484{ 485 struct clk_core *child; 486 487 lockdep_assert_held(&prepare_lock); 488 489 hlist_for_each_entry(child, &clk->children, child_node) 490 clk_unprepare_unused_subtree(child); 491 492 if (clk->prepare_count) 493 return; 494 495 if (clk->flags & CLK_IGNORE_UNUSED) 496 return; 497 498 if (clk_core_is_prepared(clk)) { 499 trace_clk_unprepare(clk); 500 if (clk->ops->unprepare_unused) 501 clk->ops->unprepare_unused(clk->hw); 502 else if (clk->ops->unprepare) 503 clk->ops->unprepare(clk->hw); 504 trace_clk_unprepare_complete(clk); 505 } 506} 507 508/* caller must hold prepare_lock */ 509static void clk_disable_unused_subtree(struct clk_core *clk) 510{ 511 struct clk_core *child; 512 unsigned long flags; 513 514 lockdep_assert_held(&prepare_lock); 515 516 hlist_for_each_entry(child, &clk->children, child_node) 517 clk_disable_unused_subtree(child); 518 519 flags = clk_enable_lock(); 520 521 if (clk->enable_count) 522 goto unlock_out; 523 524 if (clk->flags & CLK_IGNORE_UNUSED) 525 goto unlock_out; 526 527 /* 528 * some gate clocks have special needs during the disable-unused 529 * sequence. call .disable_unused if available, otherwise fall 530 * back to .disable 531 */ 532 if (clk_core_is_enabled(clk)) { 533 trace_clk_disable(clk); 534 if (clk->ops->disable_unused) 535 clk->ops->disable_unused(clk->hw); 536 else if (clk->ops->disable) 537 clk->ops->disable(clk->hw); 538 trace_clk_disable_complete(clk); 539 } 540 541unlock_out: 542 clk_enable_unlock(flags); 543} 544 545static bool clk_ignore_unused; 546static int __init clk_ignore_unused_setup(char *__unused) 547{ 548 clk_ignore_unused = true; 549 return 1; 550} 551__setup("clk_ignore_unused", clk_ignore_unused_setup); 552 553static int clk_disable_unused(void) 554{ 555 struct clk_core *clk; 556 557 if (clk_ignore_unused) { 558 pr_warn("clk: Not disabling unused clocks\n"); 559 return 0; 560 } 561 562 clk_prepare_lock(); 563 564 hlist_for_each_entry(clk, &clk_root_list, child_node) 565 clk_disable_unused_subtree(clk); 566 567 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 568 clk_disable_unused_subtree(clk); 569 570 hlist_for_each_entry(clk, &clk_root_list, child_node) 571 clk_unprepare_unused_subtree(clk); 572 573 hlist_for_each_entry(clk, &clk_orphan_list, child_node) 574 clk_unprepare_unused_subtree(clk); 575 576 clk_prepare_unlock(); 577 578 return 0; 579} 580late_initcall_sync(clk_disable_unused); 581 582/*** helper functions ***/ 583 584const char *__clk_get_name(struct clk *clk) 585{ 586 return !clk ? NULL : clk->core->name; 587} 588EXPORT_SYMBOL_GPL(__clk_get_name); 589 590struct clk_hw *__clk_get_hw(struct clk *clk) 591{ 592 return !clk ? NULL : clk->core->hw; 593} 594EXPORT_SYMBOL_GPL(__clk_get_hw); 595 596u8 __clk_get_num_parents(struct clk *clk) 597{ 598 return !clk ? 0 : clk->core->num_parents; 599} 600EXPORT_SYMBOL_GPL(__clk_get_num_parents); 601 602struct clk *__clk_get_parent(struct clk *clk) 603{ 604 if (!clk) 605 return NULL; 606 607 /* TODO: Create a per-user clk and change callers to call clk_put */ 608 return !clk->core->parent ? NULL : clk->core->parent->hw->clk; 609} 610EXPORT_SYMBOL_GPL(__clk_get_parent); 611 612static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk, 613 u8 index) 614{ 615 if (!clk || index >= clk->num_parents) 616 return NULL; 617 else if (!clk->parents) 618 return clk_core_lookup(clk->parent_names[index]); 619 else if (!clk->parents[index]) 620 return clk->parents[index] = 621 clk_core_lookup(clk->parent_names[index]); 622 else 623 return clk->parents[index]; 624} 625 626struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) 627{ 628 struct clk_core *parent; 629 630 if (!clk) 631 return NULL; 632 633 parent = clk_core_get_parent_by_index(clk->core, index); 634 635 return !parent ? NULL : parent->hw->clk; 636} 637EXPORT_SYMBOL_GPL(clk_get_parent_by_index); 638 639unsigned int __clk_get_enable_count(struct clk *clk) 640{ 641 return !clk ? 0 : clk->core->enable_count; 642} 643 644static unsigned long clk_core_get_rate_nolock(struct clk_core *clk) 645{ 646 unsigned long ret; 647 648 if (!clk) { 649 ret = 0; 650 goto out; 651 } 652 653 ret = clk->rate; 654 655 if (clk->flags & CLK_IS_ROOT) 656 goto out; 657 658 if (!clk->parent) 659 ret = 0; 660 661out: 662 return ret; 663} 664 665unsigned long __clk_get_rate(struct clk *clk) 666{ 667 if (!clk) 668 return 0; 669 670 return clk_core_get_rate_nolock(clk->core); 671} 672EXPORT_SYMBOL_GPL(__clk_get_rate); 673 674static unsigned long __clk_get_accuracy(struct clk_core *clk) 675{ 676 if (!clk) 677 return 0; 678 679 return clk->accuracy; 680} 681 682unsigned long __clk_get_flags(struct clk *clk) 683{ 684 return !clk ? 0 : clk->core->flags; 685} 686EXPORT_SYMBOL_GPL(__clk_get_flags); 687 688static bool clk_core_is_prepared(struct clk_core *clk) 689{ 690 int ret; 691 692 if (!clk) 693 return false; 694 695 /* 696 * .is_prepared is optional for clocks that can prepare 697 * fall back to software usage counter if it is missing 698 */ 699 if (!clk->ops->is_prepared) { 700 ret = clk->prepare_count ? 1 : 0; 701 goto out; 702 } 703 704 ret = clk->ops->is_prepared(clk->hw); 705out: 706 return !!ret; 707} 708 709bool __clk_is_prepared(struct clk *clk) 710{ 711 if (!clk) 712 return false; 713 714 return clk_core_is_prepared(clk->core); 715} 716 717static bool clk_core_is_enabled(struct clk_core *clk) 718{ 719 int ret; 720 721 if (!clk) 722 return false; 723 724 /* 725 * .is_enabled is only mandatory for clocks that gate 726 * fall back to software usage counter if .is_enabled is missing 727 */ 728 if (!clk->ops->is_enabled) { 729 ret = clk->enable_count ? 1 : 0; 730 goto out; 731 } 732 733 ret = clk->ops->is_enabled(clk->hw); 734out: 735 return !!ret; 736} 737 738bool __clk_is_enabled(struct clk *clk) 739{ 740 if (!clk) 741 return false; 742 743 return clk_core_is_enabled(clk->core); 744} 745EXPORT_SYMBOL_GPL(__clk_is_enabled); 746 747static struct clk_core *__clk_lookup_subtree(const char *name, 748 struct clk_core *clk) 749{ 750 struct clk_core *child; 751 struct clk_core *ret; 752 753 if (!strcmp(clk->name, name)) 754 return clk; 755 756 hlist_for_each_entry(child, &clk->children, child_node) { 757 ret = __clk_lookup_subtree(name, child); 758 if (ret) 759 return ret; 760 } 761 762 return NULL; 763} 764 765static struct clk_core *clk_core_lookup(const char *name) 766{ 767 struct clk_core *root_clk; 768 struct clk_core *ret; 769 770 if (!name) 771 return NULL; 772 773 /* search the 'proper' clk tree first */ 774 hlist_for_each_entry(root_clk, &clk_root_list, child_node) { 775 ret = __clk_lookup_subtree(name, root_clk); 776 if (ret) 777 return ret; 778 } 779 780 /* if not found, then search the orphan tree */ 781 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { 782 ret = __clk_lookup_subtree(name, root_clk); 783 if (ret) 784 return ret; 785 } 786 787 return NULL; 788} 789 790static bool mux_is_better_rate(unsigned long rate, unsigned long now, 791 unsigned long best, unsigned long flags) 792{ 793 if (flags & CLK_MUX_ROUND_CLOSEST) 794 return abs(now - rate) < abs(best - rate); 795 796 return now <= rate && now > best; 797} 798 799static long 800clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, 801 unsigned long min_rate, 802 unsigned long max_rate, 803 unsigned long *best_parent_rate, 804 struct clk_hw **best_parent_p, 805 unsigned long flags) 806{ 807 struct clk_core *core = hw->core, *parent, *best_parent = NULL; 808 int i, num_parents; 809 unsigned long parent_rate, best = 0; 810 811 /* if NO_REPARENT flag set, pass through to current parent */ 812 if (core->flags & CLK_SET_RATE_NO_REPARENT) { 813 parent = core->parent; 814 if (core->flags & CLK_SET_RATE_PARENT) 815 best = __clk_determine_rate(parent ? parent->hw : NULL, 816 rate, min_rate, max_rate); 817 else if (parent) 818 best = clk_core_get_rate_nolock(parent); 819 else 820 best = clk_core_get_rate_nolock(core); 821 goto out; 822 } 823 824 /* find the parent that can provide the fastest rate <= rate */ 825 num_parents = core->num_parents; 826 for (i = 0; i < num_parents; i++) { 827 parent = clk_core_get_parent_by_index(core, i); 828 if (!parent) 829 continue; 830 if (core->flags & CLK_SET_RATE_PARENT) 831 parent_rate = __clk_determine_rate(parent->hw, rate, 832 min_rate, 833 max_rate); 834 else 835 parent_rate = clk_core_get_rate_nolock(parent); 836 if (mux_is_better_rate(rate, parent_rate, best, flags)) { 837 best_parent = parent; 838 best = parent_rate; 839 } 840 } 841 842out: 843 if (best_parent) 844 *best_parent_p = best_parent->hw; 845 *best_parent_rate = best; 846 847 return best; 848} 849 850struct clk *__clk_lookup(const char *name) 851{ 852 struct clk_core *core = clk_core_lookup(name); 853 854 return !core ? NULL : core->hw->clk; 855} 856 857static void clk_core_get_boundaries(struct clk_core *clk, 858 unsigned long *min_rate, 859 unsigned long *max_rate) 860{ 861 struct clk *clk_user; 862 863 *min_rate = 0; 864 *max_rate = ULONG_MAX; 865 866 hlist_for_each_entry(clk_user, &clk->clks, clks_node) 867 *min_rate = max(*min_rate, clk_user->min_rate); 868 869 hlist_for_each_entry(clk_user, &clk->clks, clks_node) 870 *max_rate = min(*max_rate, clk_user->max_rate); 871} 872 873/* 874 * Helper for finding best parent to provide a given frequency. This can be used 875 * directly as a determine_rate callback (e.g. for a mux), or from a more 876 * complex clock that may combine a mux with other operations. 877 */ 878long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 879 unsigned long min_rate, 880 unsigned long max_rate, 881 unsigned long *best_parent_rate, 882 struct clk_hw **best_parent_p) 883{ 884 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, 885 best_parent_rate, 886 best_parent_p, 0); 887} 888EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); 889 890long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, 891 unsigned long min_rate, 892 unsigned long max_rate, 893 unsigned long *best_parent_rate, 894 struct clk_hw **best_parent_p) 895{ 896 return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, 897 best_parent_rate, 898 best_parent_p, 899 CLK_MUX_ROUND_CLOSEST); 900} 901EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); 902 903/*** clk api ***/ 904 905static void clk_core_unprepare(struct clk_core *clk) 906{ 907 if (!clk) 908 return; 909 910 if (WARN_ON(clk->prepare_count == 0)) 911 return; 912 913 if (--clk->prepare_count > 0) 914 return; 915 916 WARN_ON(clk->enable_count > 0); 917 918 trace_clk_unprepare(clk); 919 920 if (clk->ops->unprepare) 921 clk->ops->unprepare(clk->hw); 922 923 trace_clk_unprepare_complete(clk); 924 clk_core_unprepare(clk->parent); 925} 926 927/** 928 * clk_unprepare - undo preparation of a clock source 929 * @clk: the clk being unprepared 930 * 931 * clk_unprepare may sleep, which differentiates it from clk_disable. In a 932 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk 933 * if the operation may sleep. One example is a clk which is accessed over 934 * I2c. In the complex case a clk gate operation may require a fast and a slow 935 * part. It is this reason that clk_unprepare and clk_disable are not mutually 936 * exclusive. In fact clk_disable must be called before clk_unprepare. 937 */ 938void clk_unprepare(struct clk *clk) 939{ 940 if (IS_ERR_OR_NULL(clk)) 941 return; 942 943 clk_prepare_lock(); 944 clk_core_unprepare(clk->core); 945 clk_prepare_unlock(); 946} 947EXPORT_SYMBOL_GPL(clk_unprepare); 948 949static int clk_core_prepare(struct clk_core *clk) 950{ 951 int ret = 0; 952 953 if (!clk) 954 return 0; 955 956 if (clk->prepare_count == 0) { 957 ret = clk_core_prepare(clk->parent); 958 if (ret) 959 return ret; 960 961 trace_clk_prepare(clk); 962 963 if (clk->ops->prepare) 964 ret = clk->ops->prepare(clk->hw); 965 966 trace_clk_prepare_complete(clk); 967 968 if (ret) { 969 clk_core_unprepare(clk->parent); 970 return ret; 971 } 972 } 973 974 clk->prepare_count++; 975 976 return 0; 977} 978 979/** 980 * clk_prepare - prepare a clock source 981 * @clk: the clk being prepared 982 * 983 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple 984 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the 985 * operation may sleep. One example is a clk which is accessed over I2c. In 986 * the complex case a clk ungate operation may require a fast and a slow part. 987 * It is this reason that clk_prepare and clk_enable are not mutually 988 * exclusive. In fact clk_prepare must be called before clk_enable. 989 * Returns 0 on success, -EERROR otherwise. 990 */ 991int clk_prepare(struct clk *clk) 992{ 993 int ret; 994 995 if (!clk) 996 return 0; 997 998 clk_prepare_lock(); 999 ret = clk_core_prepare(clk->core); 1000 clk_prepare_unlock(); 1001 1002 return ret; 1003} 1004EXPORT_SYMBOL_GPL(clk_prepare); 1005 1006static void clk_core_disable(struct clk_core *clk) 1007{ 1008 if (!clk) 1009 return; 1010 1011 if (WARN_ON(clk->enable_count == 0)) 1012 return; 1013 1014 if (--clk->enable_count > 0) 1015 return; 1016 1017 trace_clk_disable(clk); 1018 1019 if (clk->ops->disable) 1020 clk->ops->disable(clk->hw); 1021 1022 trace_clk_disable_complete(clk); 1023 1024 clk_core_disable(clk->parent); 1025} 1026 1027static void __clk_disable(struct clk *clk) 1028{ 1029 if (!clk) 1030 return; 1031 1032 clk_core_disable(clk->core); 1033} 1034 1035/** 1036 * clk_disable - gate a clock 1037 * @clk: the clk being gated 1038 * 1039 * clk_disable must not sleep, which differentiates it from clk_unprepare. In 1040 * a simple case, clk_disable can be used instead of clk_unprepare to gate a 1041 * clk if the operation is fast and will never sleep. One example is a 1042 * SoC-internal clk which is controlled via simple register writes. In the 1043 * complex case a clk gate operation may require a fast and a slow part. It is 1044 * this reason that clk_unprepare and clk_disable are not mutually exclusive. 1045 * In fact clk_disable must be called before clk_unprepare. 1046 */ 1047void clk_disable(struct clk *clk) 1048{ 1049 unsigned long flags; 1050 1051 if (IS_ERR_OR_NULL(clk)) 1052 return; 1053 1054 flags = clk_enable_lock(); 1055 __clk_disable(clk); 1056 clk_enable_unlock(flags); 1057} 1058EXPORT_SYMBOL_GPL(clk_disable); 1059 1060static int clk_core_enable(struct clk_core *clk) 1061{ 1062 int ret = 0; 1063 1064 if (!clk) 1065 return 0; 1066 1067 if (WARN_ON(clk->prepare_count == 0)) 1068 return -ESHUTDOWN; 1069 1070 if (clk->enable_count == 0) { 1071 ret = clk_core_enable(clk->parent); 1072 1073 if (ret) 1074 return ret; 1075 1076 trace_clk_enable(clk); 1077 1078 if (clk->ops->enable) 1079 ret = clk->ops->enable(clk->hw); 1080 1081 trace_clk_enable_complete(clk); 1082 1083 if (ret) { 1084 clk_core_disable(clk->parent); 1085 return ret; 1086 } 1087 } 1088 1089 clk->enable_count++; 1090 return 0; 1091} 1092 1093static int __clk_enable(struct clk *clk) 1094{ 1095 if (!clk) 1096 return 0; 1097 1098 return clk_core_enable(clk->core); 1099} 1100 1101/** 1102 * clk_enable - ungate a clock 1103 * @clk: the clk being ungated 1104 * 1105 * clk_enable must not sleep, which differentiates it from clk_prepare. In a 1106 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk 1107 * if the operation will never sleep. One example is a SoC-internal clk which 1108 * is controlled via simple register writes. In the complex case a clk ungate 1109 * operation may require a fast and a slow part. It is this reason that 1110 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare 1111 * must be called before clk_enable. Returns 0 on success, -EERROR 1112 * otherwise. 1113 */ 1114int clk_enable(struct clk *clk) 1115{ 1116 unsigned long flags; 1117 int ret; 1118 1119 flags = clk_enable_lock(); 1120 ret = __clk_enable(clk); 1121 clk_enable_unlock(flags); 1122 1123 return ret; 1124} 1125EXPORT_SYMBOL_GPL(clk_enable); 1126 1127static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, 1128 unsigned long rate, 1129 unsigned long min_rate, 1130 unsigned long max_rate) 1131{ 1132 unsigned long parent_rate = 0; 1133 struct clk_core *parent; 1134 struct clk_hw *parent_hw; 1135 1136 lockdep_assert_held(&prepare_lock); 1137 1138 if (!clk) 1139 return 0; 1140 1141 parent = clk->parent; 1142 if (parent) 1143 parent_rate = parent->rate; 1144 1145 if (clk->ops->determine_rate) { 1146 parent_hw = parent ? parent->hw : NULL; 1147 return clk->ops->determine_rate(clk->hw, rate, 1148 min_rate, max_rate, 1149 &parent_rate, &parent_hw); 1150 } else if (clk->ops->round_rate) 1151 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 1152 else if (clk->flags & CLK_SET_RATE_PARENT) 1153 return clk_core_round_rate_nolock(clk->parent, rate, min_rate, 1154 max_rate); 1155 else 1156 return clk->rate; 1157} 1158 1159/** 1160 * __clk_determine_rate - get the closest rate actually supported by a clock 1161 * @hw: determine the rate of this clock 1162 * @rate: target rate 1163 * @min_rate: returned rate must be greater than this rate 1164 * @max_rate: returned rate must be less than this rate 1165 * 1166 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and 1167 * .determine_rate. 1168 */ 1169unsigned long __clk_determine_rate(struct clk_hw *hw, 1170 unsigned long rate, 1171 unsigned long min_rate, 1172 unsigned long max_rate) 1173{ 1174 if (!hw) 1175 return 0; 1176 1177 return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate); 1178} 1179EXPORT_SYMBOL_GPL(__clk_determine_rate); 1180 1181/** 1182 * __clk_round_rate - round the given rate for a clk 1183 * @clk: round the rate of this clock 1184 * @rate: the rate which is to be rounded 1185 * 1186 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate 1187 */ 1188unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 1189{ 1190 unsigned long min_rate; 1191 unsigned long max_rate; 1192 1193 if (!clk) 1194 return 0; 1195 1196 clk_core_get_boundaries(clk->core, &min_rate, &max_rate); 1197 1198 return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate); 1199} 1200EXPORT_SYMBOL_GPL(__clk_round_rate); 1201 1202/** 1203 * clk_round_rate - round the given rate for a clk 1204 * @clk: the clk for which we are rounding a rate 1205 * @rate: the rate which is to be rounded 1206 * 1207 * Takes in a rate as input and rounds it to a rate that the clk can actually 1208 * use which is then returned. If clk doesn't support round_rate operation 1209 * then the parent rate is returned. 1210 */ 1211long clk_round_rate(struct clk *clk, unsigned long rate) 1212{ 1213 unsigned long ret; 1214 1215 if (!clk) 1216 return 0; 1217 1218 clk_prepare_lock(); 1219 ret = __clk_round_rate(clk, rate); 1220 clk_prepare_unlock(); 1221 1222 return ret; 1223} 1224EXPORT_SYMBOL_GPL(clk_round_rate); 1225 1226/** 1227 * __clk_notify - call clk notifier chain 1228 * @clk: struct clk * that is changing rate 1229 * @msg: clk notifier type (see include/linux/clk.h) 1230 * @old_rate: old clk rate 1231 * @new_rate: new clk rate 1232 * 1233 * Triggers a notifier call chain on the clk rate-change notification 1234 * for 'clk'. Passes a pointer to the struct clk and the previous 1235 * and current rates to the notifier callback. Intended to be called by 1236 * internal clock code only. Returns NOTIFY_DONE from the last driver 1237 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if 1238 * a driver returns that. 1239 */ 1240static int __clk_notify(struct clk_core *clk, unsigned long msg, 1241 unsigned long old_rate, unsigned long new_rate) 1242{ 1243 struct clk_notifier *cn; 1244 struct clk_notifier_data cnd; 1245 int ret = NOTIFY_DONE; 1246 1247 cnd.old_rate = old_rate; 1248 cnd.new_rate = new_rate; 1249 1250 list_for_each_entry(cn, &clk_notifier_list, node) { 1251 if (cn->clk->core == clk) { 1252 cnd.clk = cn->clk; 1253 ret = srcu_notifier_call_chain(&cn->notifier_head, msg, 1254 &cnd); 1255 } 1256 } 1257 1258 return ret; 1259} 1260 1261/** 1262 * __clk_recalc_accuracies 1263 * @clk: first clk in the subtree 1264 * 1265 * Walks the subtree of clks starting with clk and recalculates accuracies as 1266 * it goes. Note that if a clk does not implement the .recalc_accuracy 1267 * callback then it is assumed that the clock will take on the accuracy of it's 1268 * parent. 1269 * 1270 * Caller must hold prepare_lock. 1271 */ 1272static void __clk_recalc_accuracies(struct clk_core *clk) 1273{ 1274 unsigned long parent_accuracy = 0; 1275 struct clk_core *child; 1276 1277 lockdep_assert_held(&prepare_lock); 1278 1279 if (clk->parent) 1280 parent_accuracy = clk->parent->accuracy; 1281 1282 if (clk->ops->recalc_accuracy) 1283 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 1284 parent_accuracy); 1285 else 1286 clk->accuracy = parent_accuracy; 1287 1288 hlist_for_each_entry(child, &clk->children, child_node) 1289 __clk_recalc_accuracies(child); 1290} 1291 1292static long clk_core_get_accuracy(struct clk_core *clk) 1293{ 1294 unsigned long accuracy; 1295 1296 clk_prepare_lock(); 1297 if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) 1298 __clk_recalc_accuracies(clk); 1299 1300 accuracy = __clk_get_accuracy(clk); 1301 clk_prepare_unlock(); 1302 1303 return accuracy; 1304} 1305 1306/** 1307 * clk_get_accuracy - return the accuracy of clk 1308 * @clk: the clk whose accuracy is being returned 1309 * 1310 * Simply returns the cached accuracy of the clk, unless 1311 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be 1312 * issued. 1313 * If clk is NULL then returns 0. 1314 */ 1315long clk_get_accuracy(struct clk *clk) 1316{ 1317 if (!clk) 1318 return 0; 1319 1320 return clk_core_get_accuracy(clk->core); 1321} 1322EXPORT_SYMBOL_GPL(clk_get_accuracy); 1323 1324static unsigned long clk_recalc(struct clk_core *clk, 1325 unsigned long parent_rate) 1326{ 1327 if (clk->ops->recalc_rate) 1328 return clk->ops->recalc_rate(clk->hw, parent_rate); 1329 return parent_rate; 1330} 1331 1332/** 1333 * __clk_recalc_rates 1334 * @clk: first clk in the subtree 1335 * @msg: notification type (see include/linux/clk.h) 1336 * 1337 * Walks the subtree of clks starting with clk and recalculates rates as it 1338 * goes. Note that if a clk does not implement the .recalc_rate callback then 1339 * it is assumed that the clock will take on the rate of its parent. 1340 * 1341 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, 1342 * if necessary. 1343 * 1344 * Caller must hold prepare_lock. 1345 */ 1346static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg) 1347{ 1348 unsigned long old_rate; 1349 unsigned long parent_rate = 0; 1350 struct clk_core *child; 1351 1352 lockdep_assert_held(&prepare_lock); 1353 1354 old_rate = clk->rate; 1355 1356 if (clk->parent) 1357 parent_rate = clk->parent->rate; 1358 1359 clk->rate = clk_recalc(clk, parent_rate); 1360 1361 /* 1362 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE 1363 * & ABORT_RATE_CHANGE notifiers 1364 */ 1365 if (clk->notifier_count && msg) 1366 __clk_notify(clk, msg, old_rate, clk->rate); 1367 1368 hlist_for_each_entry(child, &clk->children, child_node) 1369 __clk_recalc_rates(child, msg); 1370} 1371 1372static unsigned long clk_core_get_rate(struct clk_core *clk) 1373{ 1374 unsigned long rate; 1375 1376 clk_prepare_lock(); 1377 1378 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) 1379 __clk_recalc_rates(clk, 0); 1380 1381 rate = clk_core_get_rate_nolock(clk); 1382 clk_prepare_unlock(); 1383 1384 return rate; 1385} 1386 1387/** 1388 * clk_get_rate - return the rate of clk 1389 * @clk: the clk whose rate is being returned 1390 * 1391 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag 1392 * is set, which means a recalc_rate will be issued. 1393 * If clk is NULL then returns 0. 1394 */ 1395unsigned long clk_get_rate(struct clk *clk) 1396{ 1397 if (!clk) 1398 return 0; 1399 1400 return clk_core_get_rate(clk->core); 1401} 1402EXPORT_SYMBOL_GPL(clk_get_rate); 1403 1404static int clk_fetch_parent_index(struct clk_core *clk, 1405 struct clk_core *parent) 1406{ 1407 int i; 1408 1409 if (!clk->parents) { 1410 clk->parents = kcalloc(clk->num_parents, 1411 sizeof(struct clk *), GFP_KERNEL); 1412 if (!clk->parents) 1413 return -ENOMEM; 1414 } 1415 1416 /* 1417 * find index of new parent clock using cached parent ptrs, 1418 * or if not yet cached, use string name comparison and cache 1419 * them now to avoid future calls to clk_core_lookup. 1420 */ 1421 for (i = 0; i < clk->num_parents; i++) { 1422 if (clk->parents[i] == parent) 1423 return i; 1424 1425 if (clk->parents[i]) 1426 continue; 1427 1428 if (!strcmp(clk->parent_names[i], parent->name)) { 1429 clk->parents[i] = clk_core_lookup(parent->name); 1430 return i; 1431 } 1432 } 1433 1434 return -EINVAL; 1435} 1436 1437static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent) 1438{ 1439 hlist_del(&clk->child_node); 1440 1441 if (new_parent) { 1442 /* avoid duplicate POST_RATE_CHANGE notifications */ 1443 if (new_parent->new_child == clk) 1444 new_parent->new_child = NULL; 1445 1446 hlist_add_head(&clk->child_node, &new_parent->children); 1447 } else { 1448 hlist_add_head(&clk->child_node, &clk_orphan_list); 1449 } 1450 1451 clk->parent = new_parent; 1452} 1453 1454static struct clk_core *__clk_set_parent_before(struct clk_core *clk, 1455 struct clk_core *parent) 1456{ 1457 unsigned long flags; 1458 struct clk_core *old_parent = clk->parent; 1459 1460 /* 1461 * Migrate prepare state between parents and prevent race with 1462 * clk_enable(). 1463 * 1464 * If the clock is not prepared, then a race with 1465 * clk_enable/disable() is impossible since we already have the 1466 * prepare lock (future calls to clk_enable() need to be preceded by 1467 * a clk_prepare()). 1468 * 1469 * If the clock is prepared, migrate the prepared state to the new 1470 * parent and also protect against a race with clk_enable() by 1471 * forcing the clock and the new parent on. This ensures that all 1472 * future calls to clk_enable() are practically NOPs with respect to 1473 * hardware and software states. 1474 * 1475 * See also: Comment for clk_set_parent() below. 1476 */ 1477 if (clk->prepare_count) { 1478 clk_core_prepare(parent); 1479 flags = clk_enable_lock(); 1480 clk_core_enable(parent); 1481 clk_core_enable(clk); 1482 clk_enable_unlock(flags); 1483 } 1484 1485 /* update the clk tree topology */ 1486 flags = clk_enable_lock(); 1487 clk_reparent(clk, parent); 1488 clk_enable_unlock(flags); 1489 1490 return old_parent; 1491} 1492 1493static void __clk_set_parent_after(struct clk_core *core, 1494 struct clk_core *parent, 1495 struct clk_core *old_parent) 1496{ 1497 unsigned long flags; 1498 1499 /* 1500 * Finish the migration of prepare state and undo the changes done 1501 * for preventing a race with clk_enable(). 1502 */ 1503 if (core->prepare_count) { 1504 flags = clk_enable_lock(); 1505 clk_core_disable(core); 1506 clk_core_disable(old_parent); 1507 clk_enable_unlock(flags); 1508 clk_core_unprepare(old_parent); 1509 } 1510} 1511 1512static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, 1513 u8 p_index) 1514{ 1515 unsigned long flags; 1516 int ret = 0; 1517 struct clk_core *old_parent; 1518 1519 old_parent = __clk_set_parent_before(clk, parent); 1520 1521 trace_clk_set_parent(clk, parent); 1522 1523 /* change clock input source */ 1524 if (parent && clk->ops->set_parent) 1525 ret = clk->ops->set_parent(clk->hw, p_index); 1526 1527 trace_clk_set_parent_complete(clk, parent); 1528 1529 if (ret) { 1530 flags = clk_enable_lock(); 1531 clk_reparent(clk, old_parent); 1532 clk_enable_unlock(flags); 1533 1534 if (clk->prepare_count) { 1535 flags = clk_enable_lock(); 1536 clk_core_disable(clk); 1537 clk_core_disable(parent); 1538 clk_enable_unlock(flags); 1539 clk_core_unprepare(parent); 1540 } 1541 return ret; 1542 } 1543 1544 __clk_set_parent_after(clk, parent, old_parent); 1545 1546 return 0; 1547} 1548 1549/** 1550 * __clk_speculate_rates 1551 * @clk: first clk in the subtree 1552 * @parent_rate: the "future" rate of clk's parent 1553 * 1554 * Walks the subtree of clks starting with clk, speculating rates as it 1555 * goes and firing off PRE_RATE_CHANGE notifications as necessary. 1556 * 1557 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending 1558 * pre-rate change notifications and returns early if no clks in the 1559 * subtree have subscribed to the notifications. Note that if a clk does not 1560 * implement the .recalc_rate callback then it is assumed that the clock will 1561 * take on the rate of its parent. 1562 * 1563 * Caller must hold prepare_lock. 1564 */ 1565static int __clk_speculate_rates(struct clk_core *clk, 1566 unsigned long parent_rate) 1567{ 1568 struct clk_core *child; 1569 unsigned long new_rate; 1570 int ret = NOTIFY_DONE; 1571 1572 lockdep_assert_held(&prepare_lock); 1573 1574 new_rate = clk_recalc(clk, parent_rate); 1575 1576 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ 1577 if (clk->notifier_count) 1578 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); 1579 1580 if (ret & NOTIFY_STOP_MASK) { 1581 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", 1582 __func__, clk->name, ret); 1583 goto out; 1584 } 1585 1586 hlist_for_each_entry(child, &clk->children, child_node) { 1587 ret = __clk_speculate_rates(child, new_rate); 1588 if (ret & NOTIFY_STOP_MASK) 1589 break; 1590 } 1591 1592out: 1593 return ret; 1594} 1595 1596static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate, 1597 struct clk_core *new_parent, u8 p_index) 1598{ 1599 struct clk_core *child; 1600 1601 clk->new_rate = new_rate; 1602 clk->new_parent = new_parent; 1603 clk->new_parent_index = p_index; 1604 /* include clk in new parent's PRE_RATE_CHANGE notifications */ 1605 clk->new_child = NULL; 1606 if (new_parent && new_parent != clk->parent) 1607 new_parent->new_child = clk; 1608 1609 hlist_for_each_entry(child, &clk->children, child_node) { 1610 child->new_rate = clk_recalc(child, new_rate); 1611 clk_calc_subtree(child, child->new_rate, NULL, 0); 1612 } 1613} 1614 1615/* 1616 * calculate the new rates returning the topmost clock that has to be 1617 * changed. 1618 */ 1619static struct clk_core *clk_calc_new_rates(struct clk_core *clk, 1620 unsigned long rate) 1621{ 1622 struct clk_core *top = clk; 1623 struct clk_core *old_parent, *parent; 1624 struct clk_hw *parent_hw; 1625 unsigned long best_parent_rate = 0; 1626 unsigned long new_rate; 1627 unsigned long min_rate; 1628 unsigned long max_rate; 1629 int p_index = 0; 1630 long ret; 1631 1632 /* sanity */ 1633 if (IS_ERR_OR_NULL(clk)) 1634 return NULL; 1635 1636 /* save parent rate, if it exists */ 1637 parent = old_parent = clk->parent; 1638 if (parent) 1639 best_parent_rate = parent->rate; 1640 1641 clk_core_get_boundaries(clk, &min_rate, &max_rate); 1642 1643 /* find the closest rate and parent clk/rate */ 1644 if (clk->ops->determine_rate) { 1645 parent_hw = parent ? parent->hw : NULL; 1646 ret = clk->ops->determine_rate(clk->hw, rate, 1647 min_rate, 1648 max_rate, 1649 &best_parent_rate, 1650 &parent_hw); 1651 if (ret < 0) 1652 return NULL; 1653 1654 new_rate = ret; 1655 parent = parent_hw ? parent_hw->core : NULL; 1656 } else if (clk->ops->round_rate) { 1657 ret = clk->ops->round_rate(clk->hw, rate, 1658 &best_parent_rate); 1659 if (ret < 0) 1660 return NULL; 1661 1662 new_rate = ret; 1663 if (new_rate < min_rate || new_rate > max_rate) 1664 return NULL; 1665 } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { 1666 /* pass-through clock without adjustable parent */ 1667 clk->new_rate = clk->rate; 1668 return NULL; 1669 } else { 1670 /* pass-through clock with adjustable parent */ 1671 top = clk_calc_new_rates(parent, rate); 1672 new_rate = parent->new_rate; 1673 goto out; 1674 } 1675 1676 /* some clocks must be gated to change parent */ 1677 if (parent != old_parent && 1678 (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 1679 pr_debug("%s: %s not gated but wants to reparent\n", 1680 __func__, clk->name); 1681 return NULL; 1682 } 1683 1684 /* try finding the new parent index */ 1685 if (parent && clk->num_parents > 1) { 1686 p_index = clk_fetch_parent_index(clk, parent); 1687 if (p_index < 0) { 1688 pr_debug("%s: clk %s can not be parent of clk %s\n", 1689 __func__, parent->name, clk->name); 1690 return NULL; 1691 } 1692 } 1693 1694 if ((clk->flags & CLK_SET_RATE_PARENT) && parent && 1695 best_parent_rate != parent->rate) 1696 top = clk_calc_new_rates(parent, best_parent_rate); 1697 1698out: 1699 clk_calc_subtree(clk, new_rate, parent, p_index); 1700 1701 return top; 1702} 1703 1704/* 1705 * Notify about rate changes in a subtree. Always walk down the whole tree 1706 * so that in case of an error we can walk down the whole tree again and 1707 * abort the change. 1708 */ 1709static struct clk_core *clk_propagate_rate_change(struct clk_core *clk, 1710 unsigned long event) 1711{ 1712 struct clk_core *child, *tmp_clk, *fail_clk = NULL; 1713 int ret = NOTIFY_DONE; 1714 1715 if (clk->rate == clk->new_rate) 1716 return NULL; 1717 1718 if (clk->notifier_count) { 1719 ret = __clk_notify(clk, event, clk->rate, clk->new_rate); 1720 if (ret & NOTIFY_STOP_MASK) 1721 fail_clk = clk; 1722 } 1723 1724 hlist_for_each_entry(child, &clk->children, child_node) { 1725 /* Skip children who will be reparented to another clock */ 1726 if (child->new_parent && child->new_parent != clk) 1727 continue; 1728 tmp_clk = clk_propagate_rate_change(child, event); 1729 if (tmp_clk) 1730 fail_clk = tmp_clk; 1731 } 1732 1733 /* handle the new child who might not be in clk->children yet */ 1734 if (clk->new_child) { 1735 tmp_clk = clk_propagate_rate_change(clk->new_child, event); 1736 if (tmp_clk) 1737 fail_clk = tmp_clk; 1738 } 1739 1740 return fail_clk; 1741} 1742 1743/* 1744 * walk down a subtree and set the new rates notifying the rate 1745 * change on the way 1746 */ 1747static void clk_change_rate(struct clk_core *clk) 1748{ 1749 struct clk_core *child; 1750 struct hlist_node *tmp; 1751 unsigned long old_rate; 1752 unsigned long best_parent_rate = 0; 1753 bool skip_set_rate = false; 1754 struct clk_core *old_parent; 1755 1756 old_rate = clk->rate; 1757 1758 if (clk->new_parent) 1759 best_parent_rate = clk->new_parent->rate; 1760 else if (clk->parent) 1761 best_parent_rate = clk->parent->rate; 1762 1763 if (clk->new_parent && clk->new_parent != clk->parent) { 1764 old_parent = __clk_set_parent_before(clk, clk->new_parent); 1765 trace_clk_set_parent(clk, clk->new_parent); 1766 1767 if (clk->ops->set_rate_and_parent) { 1768 skip_set_rate = true; 1769 clk->ops->set_rate_and_parent(clk->hw, clk->new_rate, 1770 best_parent_rate, 1771 clk->new_parent_index); 1772 } else if (clk->ops->set_parent) { 1773 clk->ops->set_parent(clk->hw, clk->new_parent_index); 1774 } 1775 1776 trace_clk_set_parent_complete(clk, clk->new_parent); 1777 __clk_set_parent_after(clk, clk->new_parent, old_parent); 1778 } 1779 1780 trace_clk_set_rate(clk, clk->new_rate); 1781 1782 if (!skip_set_rate && clk->ops->set_rate) 1783 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); 1784 1785 trace_clk_set_rate_complete(clk, clk->new_rate); 1786 1787 clk->rate = clk_recalc(clk, best_parent_rate); 1788 1789 if (clk->notifier_count && old_rate != clk->rate) 1790 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); 1791 1792 /* 1793 * Use safe iteration, as change_rate can actually swap parents 1794 * for certain clock types. 1795 */ 1796 hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { 1797 /* Skip children who will be reparented to another clock */ 1798 if (child->new_parent && child->new_parent != clk) 1799 continue; 1800 clk_change_rate(child); 1801 } 1802 1803 /* handle the new child who might not be in clk->children yet */ 1804 if (clk->new_child) 1805 clk_change_rate(clk->new_child); 1806} 1807 1808static int clk_core_set_rate_nolock(struct clk_core *clk, 1809 unsigned long req_rate) 1810{ 1811 struct clk_core *top, *fail_clk; 1812 unsigned long rate = req_rate; 1813 int ret = 0; 1814 1815 if (!clk) 1816 return 0; 1817 1818 /* bail early if nothing to do */ 1819 if (rate == clk_core_get_rate_nolock(clk)) 1820 return 0; 1821 1822 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) 1823 return -EBUSY; 1824 1825 /* calculate new rates and get the topmost changed clock */ 1826 top = clk_calc_new_rates(clk, rate); 1827 if (!top) 1828 return -EINVAL; 1829 1830 /* notify that we are about to change rates */ 1831 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); 1832 if (fail_clk) { 1833 pr_debug("%s: failed to set %s rate\n", __func__, 1834 fail_clk->name); 1835 clk_propagate_rate_change(top, ABORT_RATE_CHANGE); 1836 return -EBUSY; 1837 } 1838 1839 /* change the rates */ 1840 clk_change_rate(top); 1841 1842 clk->req_rate = req_rate; 1843 1844 return ret; 1845} 1846 1847/** 1848 * clk_set_rate - specify a new rate for clk 1849 * @clk: the clk whose rate is being changed 1850 * @rate: the new rate for clk 1851 * 1852 * In the simplest case clk_set_rate will only adjust the rate of clk. 1853 * 1854 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to 1855 * propagate up to clk's parent; whether or not this happens depends on the 1856 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged 1857 * after calling .round_rate then upstream parent propagation is ignored. If 1858 * *parent_rate comes back with a new rate for clk's parent then we propagate 1859 * up to clk's parent and set its rate. Upward propagation will continue 1860 * until either a clk does not support the CLK_SET_RATE_PARENT flag or 1861 * .round_rate stops requesting changes to clk's parent_rate. 1862 * 1863 * Rate changes are accomplished via tree traversal that also recalculates the 1864 * rates for the clocks and fires off POST_RATE_CHANGE notifiers. 1865 * 1866 * Returns 0 on success, -EERROR otherwise. 1867 */ 1868int clk_set_rate(struct clk *clk, unsigned long rate) 1869{ 1870 int ret; 1871 1872 if (!clk) 1873 return 0; 1874 1875 /* prevent racing with updates to the clock topology */ 1876 clk_prepare_lock(); 1877 1878 ret = clk_core_set_rate_nolock(clk->core, rate); 1879 1880 clk_prepare_unlock(); 1881 1882 return ret; 1883} 1884EXPORT_SYMBOL_GPL(clk_set_rate); 1885 1886/** 1887 * clk_set_rate_range - set a rate range for a clock source 1888 * @clk: clock source 1889 * @min: desired minimum clock rate in Hz, inclusive 1890 * @max: desired maximum clock rate in Hz, inclusive 1891 * 1892 * Returns success (0) or negative errno. 1893 */ 1894int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) 1895{ 1896 int ret = 0; 1897 1898 if (!clk) 1899 return 0; 1900 1901 if (min > max) { 1902 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", 1903 __func__, clk->core->name, clk->dev_id, clk->con_id, 1904 min, max); 1905 return -EINVAL; 1906 } 1907 1908 clk_prepare_lock(); 1909 1910 if (min != clk->min_rate || max != clk->max_rate) { 1911 clk->min_rate = min; 1912 clk->max_rate = max; 1913 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 1914 } 1915 1916 clk_prepare_unlock(); 1917 1918 return ret; 1919} 1920EXPORT_SYMBOL_GPL(clk_set_rate_range); 1921 1922/** 1923 * clk_set_min_rate - set a minimum clock rate for a clock source 1924 * @clk: clock source 1925 * @rate: desired minimum clock rate in Hz, inclusive 1926 * 1927 * Returns success (0) or negative errno. 1928 */ 1929int clk_set_min_rate(struct clk *clk, unsigned long rate) 1930{ 1931 if (!clk) 1932 return 0; 1933 1934 return clk_set_rate_range(clk, rate, clk->max_rate); 1935} 1936EXPORT_SYMBOL_GPL(clk_set_min_rate); 1937 1938/** 1939 * clk_set_max_rate - set a maximum clock rate for a clock source 1940 * @clk: clock source 1941 * @rate: desired maximum clock rate in Hz, inclusive 1942 * 1943 * Returns success (0) or negative errno. 1944 */ 1945int clk_set_max_rate(struct clk *clk, unsigned long rate) 1946{ 1947 if (!clk) 1948 return 0; 1949 1950 return clk_set_rate_range(clk, clk->min_rate, rate); 1951} 1952EXPORT_SYMBOL_GPL(clk_set_max_rate); 1953 1954/** 1955 * clk_get_parent - return the parent of a clk 1956 * @clk: the clk whose parent gets returned 1957 * 1958 * Simply returns clk->parent. Returns NULL if clk is NULL. 1959 */ 1960struct clk *clk_get_parent(struct clk *clk) 1961{ 1962 struct clk *parent; 1963 1964 clk_prepare_lock(); 1965 parent = __clk_get_parent(clk); 1966 clk_prepare_unlock(); 1967 1968 return parent; 1969} 1970EXPORT_SYMBOL_GPL(clk_get_parent); 1971 1972/* 1973 * .get_parent is mandatory for clocks with multiple possible parents. It is 1974 * optional for single-parent clocks. Always call .get_parent if it is 1975 * available and WARN if it is missing for multi-parent clocks. 1976 * 1977 * For single-parent clocks without .get_parent, first check to see if the 1978 * .parents array exists, and if so use it to avoid an expensive tree 1979 * traversal. If .parents does not exist then walk the tree. 1980 */ 1981static struct clk_core *__clk_init_parent(struct clk_core *clk) 1982{ 1983 struct clk_core *ret = NULL; 1984 u8 index; 1985 1986 /* handle the trivial cases */ 1987 1988 if (!clk->num_parents) 1989 goto out; 1990 1991 if (clk->num_parents == 1) { 1992 if (IS_ERR_OR_NULL(clk->parent)) 1993 clk->parent = clk_core_lookup(clk->parent_names[0]); 1994 ret = clk->parent; 1995 goto out; 1996 } 1997 1998 if (!clk->ops->get_parent) { 1999 WARN(!clk->ops->get_parent, 2000 "%s: multi-parent clocks must implement .get_parent\n", 2001 __func__); 2002 goto out; 2003 }; 2004 2005 /* 2006 * Do our best to cache parent clocks in clk->parents. This prevents 2007 * unnecessary and expensive lookups. We don't set clk->parent here; 2008 * that is done by the calling function. 2009 */ 2010 2011 index = clk->ops->get_parent(clk->hw); 2012 2013 if (!clk->parents) 2014 clk->parents = 2015 kcalloc(clk->num_parents, sizeof(struct clk *), 2016 GFP_KERNEL); 2017 2018 ret = clk_core_get_parent_by_index(clk, index); 2019 2020out: 2021 return ret; 2022} 2023 2024static void clk_core_reparent(struct clk_core *clk, 2025 struct clk_core *new_parent) 2026{ 2027 clk_reparent(clk, new_parent); 2028 __clk_recalc_accuracies(clk); 2029 __clk_recalc_rates(clk, POST_RATE_CHANGE); 2030} 2031 2032/** 2033 * clk_has_parent - check if a clock is a possible parent for another 2034 * @clk: clock source 2035 * @parent: parent clock source 2036 * 2037 * This function can be used in drivers that need to check that a clock can be 2038 * the parent of another without actually changing the parent. 2039 * 2040 * Returns true if @parent is a possible parent for @clk, false otherwise. 2041 */ 2042bool clk_has_parent(struct clk *clk, struct clk *parent) 2043{ 2044 struct clk_core *core, *parent_core; 2045 unsigned int i; 2046 2047 /* NULL clocks should be nops, so return success if either is NULL. */ 2048 if (!clk || !parent) 2049 return true; 2050 2051 core = clk->core; 2052 parent_core = parent->core; 2053 2054 /* Optimize for the case where the parent is already the parent. */ 2055 if (core->parent == parent_core) 2056 return true; 2057 2058 for (i = 0; i < core->num_parents; i++) 2059 if (strcmp(core->parent_names[i], parent_core->name) == 0) 2060 return true; 2061 2062 return false; 2063} 2064EXPORT_SYMBOL_GPL(clk_has_parent); 2065 2066static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent) 2067{ 2068 int ret = 0; 2069 int p_index = 0; 2070 unsigned long p_rate = 0; 2071 2072 if (!clk) 2073 return 0; 2074 2075 /* prevent racing with updates to the clock topology */ 2076 clk_prepare_lock(); 2077 2078 if (clk->parent == parent) 2079 goto out; 2080 2081 /* verify ops for for multi-parent clks */ 2082 if ((clk->num_parents > 1) && (!clk->ops->set_parent)) { 2083 ret = -ENOSYS; 2084 goto out; 2085 } 2086 2087 /* check that we are allowed to re-parent if the clock is in use */ 2088 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { 2089 ret = -EBUSY; 2090 goto out; 2091 } 2092 2093 /* try finding the new parent index */ 2094 if (parent) { 2095 p_index = clk_fetch_parent_index(clk, parent); 2096 p_rate = parent->rate; 2097 if (p_index < 0) { 2098 pr_debug("%s: clk %s can not be parent of clk %s\n", 2099 __func__, parent->name, clk->name); 2100 ret = p_index; 2101 goto out; 2102 } 2103 } 2104 2105 /* propagate PRE_RATE_CHANGE notifications */ 2106 ret = __clk_speculate_rates(clk, p_rate); 2107 2108 /* abort if a driver objects */ 2109 if (ret & NOTIFY_STOP_MASK) 2110 goto out; 2111 2112 /* do the re-parent */ 2113 ret = __clk_set_parent(clk, parent, p_index); 2114 2115 /* propagate rate an accuracy recalculation accordingly */ 2116 if (ret) { 2117 __clk_recalc_rates(clk, ABORT_RATE_CHANGE); 2118 } else { 2119 __clk_recalc_rates(clk, POST_RATE_CHANGE); 2120 __clk_recalc_accuracies(clk); 2121 } 2122 2123out: 2124 clk_prepare_unlock(); 2125 2126 return ret; 2127} 2128 2129/** 2130 * clk_set_parent - switch the parent of a mux clk 2131 * @clk: the mux clk whose input we are switching 2132 * @parent: the new input to clk 2133 * 2134 * Re-parent clk to use parent as its new input source. If clk is in 2135 * prepared state, the clk will get enabled for the duration of this call. If 2136 * that's not acceptable for a specific clk (Eg: the consumer can't handle 2137 * that, the reparenting is glitchy in hardware, etc), use the 2138 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. 2139 * 2140 * After successfully changing clk's parent clk_set_parent will update the 2141 * clk topology, sysfs topology and propagate rate recalculation via 2142 * __clk_recalc_rates. 2143 * 2144 * Returns 0 on success, -EERROR otherwise. 2145 */ 2146int clk_set_parent(struct clk *clk, struct clk *parent) 2147{ 2148 if (!clk) 2149 return 0; 2150 2151 return clk_core_set_parent(clk->core, parent ? parent->core : NULL); 2152} 2153EXPORT_SYMBOL_GPL(clk_set_parent); 2154 2155/** 2156 * clk_set_phase - adjust the phase shift of a clock signal 2157 * @clk: clock signal source 2158 * @degrees: number of degrees the signal is shifted 2159 * 2160 * Shifts the phase of a clock signal by the specified 2161 * degrees. Returns 0 on success, -EERROR otherwise. 2162 * 2163 * This function makes no distinction about the input or reference 2164 * signal that we adjust the clock signal phase against. For example 2165 * phase locked-loop clock signal generators we may shift phase with 2166 * respect to feedback clock signal input, but for other cases the 2167 * clock phase may be shifted with respect to some other, unspecified 2168 * signal. 2169 * 2170 * Additionally the concept of phase shift does not propagate through 2171 * the clock tree hierarchy, which sets it apart from clock rates and 2172 * clock accuracy. A parent clock phase attribute does not have an 2173 * impact on the phase attribute of a child clock. 2174 */ 2175int clk_set_phase(struct clk *clk, int degrees) 2176{ 2177 int ret = -EINVAL; 2178 2179 if (!clk) 2180 return 0; 2181 2182 /* sanity check degrees */ 2183 degrees %= 360; 2184 if (degrees < 0) 2185 degrees += 360; 2186 2187 clk_prepare_lock(); 2188 2189 trace_clk_set_phase(clk->core, degrees); 2190 2191 if (clk->core->ops->set_phase) 2192 ret = clk->core->ops->set_phase(clk->core->hw, degrees); 2193 2194 trace_clk_set_phase_complete(clk->core, degrees); 2195 2196 if (!ret) 2197 clk->core->phase = degrees; 2198 2199 clk_prepare_unlock(); 2200 2201 return ret; 2202} 2203EXPORT_SYMBOL_GPL(clk_set_phase); 2204 2205static int clk_core_get_phase(struct clk_core *clk) 2206{ 2207 int ret = 0; 2208 2209 if (!clk) 2210 goto out; 2211 2212 clk_prepare_lock(); 2213 ret = clk->phase; 2214 clk_prepare_unlock(); 2215 2216out: 2217 return ret; 2218} 2219EXPORT_SYMBOL_GPL(clk_get_phase); 2220 2221/** 2222 * clk_get_phase - return the phase shift of a clock signal 2223 * @clk: clock signal source 2224 * 2225 * Returns the phase shift of a clock node in degrees, otherwise returns 2226 * -EERROR. 2227 */ 2228int clk_get_phase(struct clk *clk) 2229{ 2230 if (!clk) 2231 return 0; 2232 2233 return clk_core_get_phase(clk->core); 2234} 2235 2236/** 2237 * clk_is_match - check if two clk's point to the same hardware clock 2238 * @p: clk compared against q 2239 * @q: clk compared against p 2240 * 2241 * Returns true if the two struct clk pointers both point to the same hardware 2242 * clock node. Put differently, returns true if struct clk *p and struct clk *q 2243 * share the same struct clk_core object. 2244 * 2245 * Returns false otherwise. Note that two NULL clks are treated as matching. 2246 */ 2247bool clk_is_match(const struct clk *p, const struct clk *q) 2248{ 2249 /* trivial case: identical struct clk's or both NULL */ 2250 if (p == q) 2251 return true; 2252 2253 /* true if clk->core pointers match. Avoid derefing garbage */ 2254 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) 2255 if (p->core == q->core) 2256 return true; 2257 2258 return false; 2259} 2260EXPORT_SYMBOL_GPL(clk_is_match); 2261 2262/** 2263 * __clk_init - initialize the data structures in a struct clk 2264 * @dev: device initializing this clk, placeholder for now 2265 * @clk: clk being initialized 2266 * 2267 * Initializes the lists in struct clk_core, queries the hardware for the 2268 * parent and rate and sets them both. 2269 */ 2270static int __clk_init(struct device *dev, struct clk *clk_user) 2271{ 2272 int i, ret = 0; 2273 struct clk_core *orphan; 2274 struct hlist_node *tmp2; 2275 struct clk_core *clk; 2276 unsigned long rate; 2277 2278 if (!clk_user) 2279 return -EINVAL; 2280 2281 clk = clk_user->core; 2282 2283 clk_prepare_lock(); 2284 2285 /* check to see if a clock with this name is already registered */ 2286 if (clk_core_lookup(clk->name)) { 2287 pr_debug("%s: clk %s already initialized\n", 2288 __func__, clk->name); 2289 ret = -EEXIST; 2290 goto out; 2291 } 2292 2293 /* check that clk_ops are sane. See Documentation/clk.txt */ 2294 if (clk->ops->set_rate && 2295 !((clk->ops->round_rate || clk->ops->determine_rate) && 2296 clk->ops->recalc_rate)) { 2297 pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", 2298 __func__, clk->name); 2299 ret = -EINVAL; 2300 goto out; 2301 } 2302 2303 if (clk->ops->set_parent && !clk->ops->get_parent) { 2304 pr_warning("%s: %s must implement .get_parent & .set_parent\n", 2305 __func__, clk->name); 2306 ret = -EINVAL; 2307 goto out; 2308 } 2309 2310 if (clk->ops->set_rate_and_parent && 2311 !(clk->ops->set_parent && clk->ops->set_rate)) { 2312 pr_warn("%s: %s must implement .set_parent & .set_rate\n", 2313 __func__, clk->name); 2314 ret = -EINVAL; 2315 goto out; 2316 } 2317 2318 /* throw a WARN if any entries in parent_names are NULL */ 2319 for (i = 0; i < clk->num_parents; i++) 2320 WARN(!clk->parent_names[i], 2321 "%s: invalid NULL in %s's .parent_names\n", 2322 __func__, clk->name); 2323 2324 /* 2325 * Allocate an array of struct clk *'s to avoid unnecessary string 2326 * look-ups of clk's possible parents. This can fail for clocks passed 2327 * in to clk_init during early boot; thus any access to clk->parents[] 2328 * must always check for a NULL pointer and try to populate it if 2329 * necessary. 2330 * 2331 * If clk->parents is not NULL we skip this entire block. This allows 2332 * for clock drivers to statically initialize clk->parents. 2333 */ 2334 if (clk->num_parents > 1 && !clk->parents) { 2335 clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), 2336 GFP_KERNEL); 2337 /* 2338 * clk_core_lookup returns NULL for parents that have not been 2339 * clk_init'd; thus any access to clk->parents[] must check 2340 * for a NULL pointer. We can always perform lazy lookups for 2341 * missing parents later on. 2342 */ 2343 if (clk->parents) 2344 for (i = 0; i < clk->num_parents; i++) 2345 clk->parents[i] = 2346 clk_core_lookup(clk->parent_names[i]); 2347 } 2348 2349 clk->parent = __clk_init_parent(clk); 2350 2351 /* 2352 * Populate clk->parent if parent has already been __clk_init'd. If 2353 * parent has not yet been __clk_init'd then place clk in the orphan 2354 * list. If clk has set the CLK_IS_ROOT flag then place it in the root 2355 * clk list. 2356 * 2357 * Every time a new clk is clk_init'd then we walk the list of orphan 2358 * clocks and re-parent any that are children of the clock currently 2359 * being clk_init'd. 2360 */ 2361 if (clk->parent) 2362 hlist_add_head(&clk->child_node, 2363 &clk->parent->children); 2364 else if (clk->flags & CLK_IS_ROOT) 2365 hlist_add_head(&clk->child_node, &clk_root_list); 2366 else 2367 hlist_add_head(&clk->child_node, &clk_orphan_list); 2368 2369 /* 2370 * Set clk's accuracy. The preferred method is to use 2371 * .recalc_accuracy. For simple clocks and lazy developers the default 2372 * fallback is to use the parent's accuracy. If a clock doesn't have a 2373 * parent (or is orphaned) then accuracy is set to zero (perfect 2374 * clock). 2375 */ 2376 if (clk->ops->recalc_accuracy) 2377 clk->accuracy = clk->ops->recalc_accuracy(clk->hw, 2378 __clk_get_accuracy(clk->parent)); 2379 else if (clk->parent) 2380 clk->accuracy = clk->parent->accuracy; 2381 else 2382 clk->accuracy = 0; 2383 2384 /* 2385 * Set clk's phase. 2386 * Since a phase is by definition relative to its parent, just 2387 * query the current clock phase, or just assume it's in phase. 2388 */ 2389 if (clk->ops->get_phase) 2390 clk->phase = clk->ops->get_phase(clk->hw); 2391 else 2392 clk->phase = 0; 2393 2394 /* 2395 * Set clk's rate. The preferred method is to use .recalc_rate. For 2396 * simple clocks and lazy developers the default fallback is to use the 2397 * parent's rate. If a clock doesn't have a parent (or is orphaned) 2398 * then rate is set to zero. 2399 */ 2400 if (clk->ops->recalc_rate) 2401 rate = clk->ops->recalc_rate(clk->hw, 2402 clk_core_get_rate_nolock(clk->parent)); 2403 else if (clk->parent) 2404 rate = clk->parent->rate; 2405 else 2406 rate = 0; 2407 clk->rate = clk->req_rate = rate; 2408 2409 /* 2410 * walk the list of orphan clocks and reparent any that are children of 2411 * this clock 2412 */ 2413 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2414 if (orphan->num_parents && orphan->ops->get_parent) { 2415 i = orphan->ops->get_parent(orphan->hw); 2416 if (!strcmp(clk->name, orphan->parent_names[i])) 2417 clk_core_reparent(orphan, clk); 2418 continue; 2419 } 2420 2421 for (i = 0; i < orphan->num_parents; i++) 2422 if (!strcmp(clk->name, orphan->parent_names[i])) { 2423 clk_core_reparent(orphan, clk); 2424 break; 2425 } 2426 } 2427 2428 /* 2429 * optional platform-specific magic 2430 * 2431 * The .init callback is not used by any of the basic clock types, but 2432 * exists for weird hardware that must perform initialization magic. 2433 * Please consider other ways of solving initialization problems before 2434 * using this callback, as its use is discouraged. 2435 */ 2436 if (clk->ops->init) 2437 clk->ops->init(clk->hw); 2438 2439 kref_init(&clk->ref); 2440out: 2441 clk_prepare_unlock(); 2442 2443 if (!ret) 2444 clk_debug_register(clk); 2445 2446 return ret; 2447} 2448 2449struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, 2450 const char *con_id) 2451{ 2452 struct clk *clk; 2453 2454 /* This is to allow this function to be chained to others */ 2455 if (!hw || IS_ERR(hw)) 2456 return (struct clk *) hw; 2457 2458 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2459 if (!clk) 2460 return ERR_PTR(-ENOMEM); 2461 2462 clk->core = hw->core; 2463 clk->dev_id = dev_id; 2464 clk->con_id = con_id; 2465 clk->max_rate = ULONG_MAX; 2466 2467 clk_prepare_lock(); 2468 hlist_add_head(&clk->clks_node, &hw->core->clks); 2469 clk_prepare_unlock(); 2470 2471 return clk; 2472} 2473 2474void __clk_free_clk(struct clk *clk) 2475{ 2476 clk_prepare_lock(); 2477 hlist_del(&clk->clks_node); 2478 clk_prepare_unlock(); 2479 2480 kfree(clk); 2481} 2482 2483/** 2484 * clk_register - allocate a new clock, register it and return an opaque cookie 2485 * @dev: device that is registering this clock 2486 * @hw: link to hardware-specific clock data 2487 * 2488 * clk_register is the primary interface for populating the clock tree with new 2489 * clock nodes. It returns a pointer to the newly allocated struct clk which 2490 * cannot be dereferenced by driver code but may be used in conjuction with the 2491 * rest of the clock API. In the event of an error clk_register will return an 2492 * error code; drivers must test for an error code after calling clk_register. 2493 */ 2494struct clk *clk_register(struct device *dev, struct clk_hw *hw) 2495{ 2496 int i, ret; 2497 struct clk_core *clk; 2498 2499 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 2500 if (!clk) { 2501 pr_err("%s: could not allocate clk\n", __func__); 2502 ret = -ENOMEM; 2503 goto fail_out; 2504 } 2505 2506 clk->name = kstrdup_const(hw->init->name, GFP_KERNEL); 2507 if (!clk->name) { 2508 pr_err("%s: could not allocate clk->name\n", __func__); 2509 ret = -ENOMEM; 2510 goto fail_name; 2511 } 2512 clk->ops = hw->init->ops; 2513 if (dev && dev->driver) 2514 clk->owner = dev->driver->owner; 2515 clk->hw = hw; 2516 clk->flags = hw->init->flags; 2517 clk->num_parents = hw->init->num_parents; 2518 hw->core = clk; 2519 2520 /* allocate local copy in case parent_names is __initdata */ 2521 clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), 2522 GFP_KERNEL); 2523 2524 if (!clk->parent_names) { 2525 pr_err("%s: could not allocate clk->parent_names\n", __func__); 2526 ret = -ENOMEM; 2527 goto fail_parent_names; 2528 } 2529 2530 2531 /* copy each string name in case parent_names is __initdata */ 2532 for (i = 0; i < clk->num_parents; i++) { 2533 clk->parent_names[i] = kstrdup_const(hw->init->parent_names[i], 2534 GFP_KERNEL); 2535 if (!clk->parent_names[i]) { 2536 pr_err("%s: could not copy parent_names\n", __func__); 2537 ret = -ENOMEM; 2538 goto fail_parent_names_copy; 2539 } 2540 } 2541 2542 INIT_HLIST_HEAD(&clk->clks); 2543 2544 hw->clk = __clk_create_clk(hw, NULL, NULL); 2545 if (IS_ERR(hw->clk)) { 2546 pr_err("%s: could not allocate per-user clk\n", __func__); 2547 ret = PTR_ERR(hw->clk); 2548 goto fail_parent_names_copy; 2549 } 2550 2551 ret = __clk_init(dev, hw->clk); 2552 if (!ret) 2553 return hw->clk; 2554 2555 __clk_free_clk(hw->clk); 2556 hw->clk = NULL; 2557 2558fail_parent_names_copy: 2559 while (--i >= 0) 2560 kfree_const(clk->parent_names[i]); 2561 kfree(clk->parent_names); 2562fail_parent_names: 2563 kfree_const(clk->name); 2564fail_name: 2565 kfree(clk); 2566fail_out: 2567 return ERR_PTR(ret); 2568} 2569EXPORT_SYMBOL_GPL(clk_register); 2570 2571/* 2572 * Free memory allocated for a clock. 2573 * Caller must hold prepare_lock. 2574 */ 2575static void __clk_release(struct kref *ref) 2576{ 2577 struct clk_core *clk = container_of(ref, struct clk_core, ref); 2578 int i = clk->num_parents; 2579 2580 lockdep_assert_held(&prepare_lock); 2581 2582 kfree(clk->parents); 2583 while (--i >= 0) 2584 kfree_const(clk->parent_names[i]); 2585 2586 kfree(clk->parent_names); 2587 kfree_const(clk->name); 2588 kfree(clk); 2589} 2590 2591/* 2592 * Empty clk_ops for unregistered clocks. These are used temporarily 2593 * after clk_unregister() was called on a clock and until last clock 2594 * consumer calls clk_put() and the struct clk object is freed. 2595 */ 2596static int clk_nodrv_prepare_enable(struct clk_hw *hw) 2597{ 2598 return -ENXIO; 2599} 2600 2601static void clk_nodrv_disable_unprepare(struct clk_hw *hw) 2602{ 2603 WARN_ON_ONCE(1); 2604} 2605 2606static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate, 2607 unsigned long parent_rate) 2608{ 2609 return -ENXIO; 2610} 2611 2612static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index) 2613{ 2614 return -ENXIO; 2615} 2616 2617static const struct clk_ops clk_nodrv_ops = { 2618 .enable = clk_nodrv_prepare_enable, 2619 .disable = clk_nodrv_disable_unprepare, 2620 .prepare = clk_nodrv_prepare_enable, 2621 .unprepare = clk_nodrv_disable_unprepare, 2622 .set_rate = clk_nodrv_set_rate, 2623 .set_parent = clk_nodrv_set_parent, 2624}; 2625 2626/** 2627 * clk_unregister - unregister a currently registered clock 2628 * @clk: clock to unregister 2629 */ 2630void clk_unregister(struct clk *clk) 2631{ 2632 unsigned long flags; 2633 2634 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2635 return; 2636 2637 clk_debug_unregister(clk->core); 2638 2639 clk_prepare_lock(); 2640 2641 if (clk->core->ops == &clk_nodrv_ops) { 2642 pr_err("%s: unregistered clock: %s\n", __func__, 2643 clk->core->name); 2644 return; 2645 } 2646 /* 2647 * Assign empty clock ops for consumers that might still hold 2648 * a reference to this clock. 2649 */ 2650 flags = clk_enable_lock(); 2651 clk->core->ops = &clk_nodrv_ops; 2652 clk_enable_unlock(flags); 2653 2654 if (!hlist_empty(&clk->core->children)) { 2655 struct clk_core *child; 2656 struct hlist_node *t; 2657 2658 /* Reparent all children to the orphan list. */ 2659 hlist_for_each_entry_safe(child, t, &clk->core->children, 2660 child_node) 2661 clk_core_set_parent(child, NULL); 2662 } 2663 2664 hlist_del_init(&clk->core->child_node); 2665 2666 if (clk->core->prepare_count) 2667 pr_warn("%s: unregistering prepared clock: %s\n", 2668 __func__, clk->core->name); 2669 kref_put(&clk->core->ref, __clk_release); 2670 2671 clk_prepare_unlock(); 2672} 2673EXPORT_SYMBOL_GPL(clk_unregister); 2674 2675static void devm_clk_release(struct device *dev, void *res) 2676{ 2677 clk_unregister(*(struct clk **)res); 2678} 2679 2680/** 2681 * devm_clk_register - resource managed clk_register() 2682 * @dev: device that is registering this clock 2683 * @hw: link to hardware-specific clock data 2684 * 2685 * Managed clk_register(). Clocks returned from this function are 2686 * automatically clk_unregister()ed on driver detach. See clk_register() for 2687 * more information. 2688 */ 2689struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2690{ 2691 struct clk *clk; 2692 struct clk **clkp; 2693 2694 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); 2695 if (!clkp) 2696 return ERR_PTR(-ENOMEM); 2697 2698 clk = clk_register(dev, hw); 2699 if (!IS_ERR(clk)) { 2700 *clkp = clk; 2701 devres_add(dev, clkp); 2702 } else { 2703 devres_free(clkp); 2704 } 2705 2706 return clk; 2707} 2708EXPORT_SYMBOL_GPL(devm_clk_register); 2709 2710static int devm_clk_match(struct device *dev, void *res, void *data) 2711{ 2712 struct clk *c = res; 2713 if (WARN_ON(!c)) 2714 return 0; 2715 return c == data; 2716} 2717 2718/** 2719 * devm_clk_unregister - resource managed clk_unregister() 2720 * @clk: clock to unregister 2721 * 2722 * Deallocate a clock allocated with devm_clk_register(). Normally 2723 * this function will not need to be called and the resource management 2724 * code will ensure that the resource is freed. 2725 */ 2726void devm_clk_unregister(struct device *dev, struct clk *clk) 2727{ 2728 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk)); 2729} 2730EXPORT_SYMBOL_GPL(devm_clk_unregister); 2731 2732/* 2733 * clkdev helpers 2734 */ 2735int __clk_get(struct clk *clk) 2736{ 2737 struct clk_core *core = !clk ? NULL : clk->core; 2738 2739 if (core) { 2740 if (!try_module_get(core->owner)) 2741 return 0; 2742 2743 kref_get(&core->ref); 2744 } 2745 return 1; 2746} 2747 2748void __clk_put(struct clk *clk) 2749{ 2750 struct module *owner; 2751 2752 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2753 return; 2754 2755 clk_prepare_lock(); 2756 2757 hlist_del(&clk->clks_node); 2758 if (clk->min_rate > clk->core->req_rate || 2759 clk->max_rate < clk->core->req_rate) 2760 clk_core_set_rate_nolock(clk->core, clk->core->req_rate); 2761 2762 owner = clk->core->owner; 2763 kref_put(&clk->core->ref, __clk_release); 2764 2765 clk_prepare_unlock(); 2766 2767 module_put(owner); 2768 2769 kfree(clk); 2770} 2771 2772/*** clk rate change notifiers ***/ 2773 2774/** 2775 * clk_notifier_register - add a clk rate change notifier 2776 * @clk: struct clk * to watch 2777 * @nb: struct notifier_block * with callback info 2778 * 2779 * Request notification when clk's rate changes. This uses an SRCU 2780 * notifier because we want it to block and notifier unregistrations are 2781 * uncommon. The callbacks associated with the notifier must not 2782 * re-enter into the clk framework by calling any top-level clk APIs; 2783 * this will cause a nested prepare_lock mutex. 2784 * 2785 * In all notification cases cases (pre, post and abort rate change) the 2786 * original clock rate is passed to the callback via struct 2787 * clk_notifier_data.old_rate and the new frequency is passed via struct 2788 * clk_notifier_data.new_rate. 2789 * 2790 * clk_notifier_register() must be called from non-atomic context. 2791 * Returns -EINVAL if called with null arguments, -ENOMEM upon 2792 * allocation failure; otherwise, passes along the return value of 2793 * srcu_notifier_chain_register(). 2794 */ 2795int clk_notifier_register(struct clk *clk, struct notifier_block *nb) 2796{ 2797 struct clk_notifier *cn; 2798 int ret = -ENOMEM; 2799 2800 if (!clk || !nb) 2801 return -EINVAL; 2802 2803 clk_prepare_lock(); 2804 2805 /* search the list of notifiers for this clk */ 2806 list_for_each_entry(cn, &clk_notifier_list, node) 2807 if (cn->clk == clk) 2808 break; 2809 2810 /* if clk wasn't in the notifier list, allocate new clk_notifier */ 2811 if (cn->clk != clk) { 2812 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL); 2813 if (!cn) 2814 goto out; 2815 2816 cn->clk = clk; 2817 srcu_init_notifier_head(&cn->notifier_head); 2818 2819 list_add(&cn->node, &clk_notifier_list); 2820 } 2821 2822 ret = srcu_notifier_chain_register(&cn->notifier_head, nb); 2823 2824 clk->core->notifier_count++; 2825 2826out: 2827 clk_prepare_unlock(); 2828 2829 return ret; 2830} 2831EXPORT_SYMBOL_GPL(clk_notifier_register); 2832 2833/** 2834 * clk_notifier_unregister - remove a clk rate change notifier 2835 * @clk: struct clk * 2836 * @nb: struct notifier_block * with callback info 2837 * 2838 * Request no further notification for changes to 'clk' and frees memory 2839 * allocated in clk_notifier_register. 2840 * 2841 * Returns -EINVAL if called with null arguments; otherwise, passes 2842 * along the return value of srcu_notifier_chain_unregister(). 2843 */ 2844int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) 2845{ 2846 struct clk_notifier *cn = NULL; 2847 int ret = -EINVAL; 2848 2849 if (!clk || !nb) 2850 return -EINVAL; 2851 2852 clk_prepare_lock(); 2853 2854 list_for_each_entry(cn, &clk_notifier_list, node) 2855 if (cn->clk == clk) 2856 break; 2857 2858 if (cn->clk == clk) { 2859 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); 2860 2861 clk->core->notifier_count--; 2862 2863 /* XXX the notifier code should handle this better */ 2864 if (!cn->notifier_head.head) { 2865 srcu_cleanup_notifier_head(&cn->notifier_head); 2866 list_del(&cn->node); 2867 kfree(cn); 2868 } 2869 2870 } else { 2871 ret = -ENOENT; 2872 } 2873 2874 clk_prepare_unlock(); 2875 2876 return ret; 2877} 2878EXPORT_SYMBOL_GPL(clk_notifier_unregister); 2879 2880#ifdef CONFIG_OF 2881/** 2882 * struct of_clk_provider - Clock provider registration structure 2883 * @link: Entry in global list of clock providers 2884 * @node: Pointer to device tree node of clock provider 2885 * @get: Get clock callback. Returns NULL or a struct clk for the 2886 * given clock specifier 2887 * @data: context pointer to be passed into @get callback 2888 */ 2889struct of_clk_provider { 2890 struct list_head link; 2891 2892 struct device_node *node; 2893 struct clk *(*get)(struct of_phandle_args *clkspec, void *data); 2894 void *data; 2895}; 2896 2897static const struct of_device_id __clk_of_table_sentinel 2898 __used __section(__clk_of_table_end); 2899 2900static LIST_HEAD(of_clk_providers); 2901static DEFINE_MUTEX(of_clk_mutex); 2902 2903struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, 2904 void *data) 2905{ 2906 return data; 2907} 2908EXPORT_SYMBOL_GPL(of_clk_src_simple_get); 2909 2910struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data) 2911{ 2912 struct clk_onecell_data *clk_data = data; 2913 unsigned int idx = clkspec->args[0]; 2914 2915 if (idx >= clk_data->clk_num) { 2916 pr_err("%s: invalid clock index %d\n", __func__, idx); 2917 return ERR_PTR(-EINVAL); 2918 } 2919 2920 return clk_data->clks[idx]; 2921} 2922EXPORT_SYMBOL_GPL(of_clk_src_onecell_get); 2923 2924/** 2925 * of_clk_add_provider() - Register a clock provider for a node 2926 * @np: Device node pointer associated with clock provider 2927 * @clk_src_get: callback for decoding clock 2928 * @data: context pointer for @clk_src_get callback. 2929 */ 2930int of_clk_add_provider(struct device_node *np, 2931 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec, 2932 void *data), 2933 void *data) 2934{ 2935 struct of_clk_provider *cp; 2936 int ret; 2937 2938 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL); 2939 if (!cp) 2940 return -ENOMEM; 2941 2942 cp->node = of_node_get(np); 2943 cp->data = data; 2944 cp->get = clk_src_get; 2945 2946 mutex_lock(&of_clk_mutex); 2947 list_add(&cp->link, &of_clk_providers); 2948 mutex_unlock(&of_clk_mutex); 2949 pr_debug("Added clock from %s\n", np->full_name); 2950 2951 ret = of_clk_set_defaults(np, true); 2952 if (ret < 0) 2953 of_clk_del_provider(np); 2954 2955 return ret; 2956} 2957EXPORT_SYMBOL_GPL(of_clk_add_provider); 2958 2959/** 2960 * of_clk_del_provider() - Remove a previously registered clock provider 2961 * @np: Device node pointer associated with clock provider 2962 */ 2963void of_clk_del_provider(struct device_node *np) 2964{ 2965 struct of_clk_provider *cp; 2966 2967 mutex_lock(&of_clk_mutex); 2968 list_for_each_entry(cp, &of_clk_providers, link) { 2969 if (cp->node == np) { 2970 list_del(&cp->link); 2971 of_node_put(cp->node); 2972 kfree(cp); 2973 break; 2974 } 2975 } 2976 mutex_unlock(&of_clk_mutex); 2977} 2978EXPORT_SYMBOL_GPL(of_clk_del_provider); 2979 2980struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, 2981 const char *dev_id, const char *con_id) 2982{ 2983 struct of_clk_provider *provider; 2984 struct clk *clk = ERR_PTR(-EPROBE_DEFER); 2985 2986 if (!clkspec) 2987 return ERR_PTR(-EINVAL); 2988 2989 /* Check if we have such a provider in our array */ 2990 mutex_lock(&of_clk_mutex); 2991 list_for_each_entry(provider, &of_clk_providers, link) { 2992 if (provider->node == clkspec->np) 2993 clk = provider->get(clkspec, provider->data); 2994 if (!IS_ERR(clk)) { 2995 clk = __clk_create_clk(__clk_get_hw(clk), dev_id, 2996 con_id); 2997 2998 if (!IS_ERR(clk) && !__clk_get(clk)) { 2999 __clk_free_clk(clk); 3000 clk = ERR_PTR(-ENOENT); 3001 } 3002 3003 break; 3004 } 3005 } 3006 mutex_unlock(&of_clk_mutex); 3007 3008 return clk; 3009} 3010 3011/** 3012 * of_clk_get_from_provider() - Lookup a clock from a clock provider 3013 * @clkspec: pointer to a clock specifier data structure 3014 * 3015 * This function looks up a struct clk from the registered list of clock 3016 * providers, an input is a clock specifier data structure as returned 3017 * from the of_parse_phandle_with_args() function call. 3018 */ 3019struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) 3020{ 3021 return __of_clk_get_from_provider(clkspec, NULL, __func__); 3022} 3023 3024int of_clk_get_parent_count(struct device_node *np) 3025{ 3026 return of_count_phandle_with_args(np, "clocks", "#clock-cells"); 3027} 3028EXPORT_SYMBOL_GPL(of_clk_get_parent_count); 3029 3030const char *of_clk_get_parent_name(struct device_node *np, int index) 3031{ 3032 struct of_phandle_args clkspec; 3033 struct property *prop; 3034 const char *clk_name; 3035 const __be32 *vp; 3036 u32 pv; 3037 int rc; 3038 int count; 3039 3040 if (index < 0) 3041 return NULL; 3042 3043 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, 3044 &clkspec); 3045 if (rc) 3046 return NULL; 3047 3048 index = clkspec.args_count ? clkspec.args[0] : 0; 3049 count = 0; 3050 3051 /* if there is an indices property, use it to transfer the index 3052 * specified into an array offset for the clock-output-names property. 3053 */ 3054 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { 3055 if (index == pv) { 3056 index = count; 3057 break; 3058 } 3059 count++; 3060 } 3061 3062 if (of_property_read_string_index(clkspec.np, "clock-output-names", 3063 index, 3064 &clk_name) < 0) 3065 clk_name = clkspec.np->name; 3066 3067 of_node_put(clkspec.np); 3068 return clk_name; 3069} 3070EXPORT_SYMBOL_GPL(of_clk_get_parent_name); 3071 3072struct clock_provider { 3073 of_clk_init_cb_t clk_init_cb; 3074 struct device_node *np; 3075 struct list_head node; 3076}; 3077 3078static LIST_HEAD(clk_provider_list); 3079 3080/* 3081 * This function looks for a parent clock. If there is one, then it 3082 * checks that the provider for this parent clock was initialized, in 3083 * this case the parent clock will be ready. 3084 */ 3085static int parent_ready(struct device_node *np) 3086{ 3087 int i = 0; 3088 3089 while (true) { 3090 struct clk *clk = of_clk_get(np, i); 3091 3092 /* this parent is ready we can check the next one */ 3093 if (!IS_ERR(clk)) { 3094 clk_put(clk); 3095 i++; 3096 continue; 3097 } 3098 3099 /* at least one parent is not ready, we exit now */ 3100 if (PTR_ERR(clk) == -EPROBE_DEFER) 3101 return 0; 3102 3103 /* 3104 * Here we make assumption that the device tree is 3105 * written correctly. So an error means that there is 3106 * no more parent. As we didn't exit yet, then the 3107 * previous parent are ready. If there is no clock 3108 * parent, no need to wait for them, then we can 3109 * consider their absence as being ready 3110 */ 3111 return 1; 3112 } 3113} 3114 3115/** 3116 * of_clk_init() - Scan and init clock providers from the DT 3117 * @matches: array of compatible values and init functions for providers. 3118 * 3119 * This function scans the device tree for matching clock providers 3120 * and calls their initialization functions. It also does it by trying 3121 * to follow the dependencies. 3122 */ 3123void __init of_clk_init(const struct of_device_id *matches) 3124{ 3125 const struct of_device_id *match; 3126 struct device_node *np; 3127 struct clock_provider *clk_provider, *next; 3128 bool is_init_done; 3129 bool force = false; 3130 3131 if (!matches) 3132 matches = &__clk_of_table; 3133 3134 /* First prepare the list of the clocks providers */ 3135 for_each_matching_node_and_match(np, matches, &match) { 3136 struct clock_provider *parent = 3137 kzalloc(sizeof(struct clock_provider), GFP_KERNEL); 3138 3139 parent->clk_init_cb = match->data; 3140 parent->np = np; 3141 list_add_tail(&parent->node, &clk_provider_list); 3142 } 3143 3144 while (!list_empty(&clk_provider_list)) { 3145 is_init_done = false; 3146 list_for_each_entry_safe(clk_provider, next, 3147 &clk_provider_list, node) { 3148 if (force || parent_ready(clk_provider->np)) { 3149 3150 clk_provider->clk_init_cb(clk_provider->np); 3151 of_clk_set_defaults(clk_provider->np, true); 3152 3153 list_del(&clk_provider->node); 3154 kfree(clk_provider); 3155 is_init_done = true; 3156 } 3157 } 3158 3159 /* 3160 * We didn't manage to initialize any of the 3161 * remaining providers during the last loop, so now we 3162 * initialize all the remaining ones unconditionally 3163 * in case the clock parent was not mandatory 3164 */ 3165 if (!is_init_done) 3166 force = true; 3167 } 3168} 3169#endif 3170