root/drivers/clk/clk.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. clk_pm_runtime_get
  2. clk_pm_runtime_put
  3. clk_prepare_lock
  4. clk_prepare_unlock
  5. clk_enable_lock
  6. clk_enable_unlock
  7. clk_core_rate_is_protected
  8. clk_core_is_prepared
  9. clk_core_is_enabled
  10. __clk_get_name
  11. clk_hw_get_name
  12. __clk_get_hw
  13. clk_hw_get_num_parents
  14. clk_hw_get_parent
  15. __clk_lookup_subtree
  16. clk_core_lookup
  17. of_parse_clkspec
  18. of_clk_get_hw_from_clkspec
  19. clk_core_get
  20. clk_core_fill_parent_index
  21. clk_core_get_parent_by_index
  22. clk_hw_get_parent_by_index
  23. __clk_get_enable_count
  24. clk_core_get_rate_nolock
  25. clk_hw_get_rate
  26. __clk_get_accuracy
  27. __clk_get_flags
  28. clk_hw_get_flags
  29. clk_hw_is_prepared
  30. clk_hw_rate_is_protected
  31. clk_hw_is_enabled
  32. __clk_is_enabled
  33. mux_is_better_rate
  34. clk_mux_determine_rate_flags
  35. __clk_lookup
  36. clk_core_get_boundaries
  37. clk_hw_set_rate_range
  38. __clk_mux_determine_rate
  39. __clk_mux_determine_rate_closest
  40. clk_core_rate_unprotect
  41. clk_core_rate_nuke_protect
  42. clk_rate_exclusive_put
  43. clk_core_rate_protect
  44. clk_core_rate_restore_protect
  45. clk_rate_exclusive_get
  46. clk_core_unprepare
  47. clk_core_unprepare_lock
  48. clk_unprepare
  49. clk_core_prepare
  50. clk_core_prepare_lock
  51. clk_prepare
  52. clk_core_disable
  53. clk_core_disable_lock
  54. clk_disable
  55. clk_core_enable
  56. clk_core_enable_lock
  57. clk_gate_restore_context
  58. clk_core_save_context
  59. clk_core_restore_context
  60. clk_save_context
  61. clk_restore_context
  62. clk_enable
  63. clk_core_prepare_enable
  64. clk_core_disable_unprepare
  65. clk_unprepare_unused_subtree
  66. clk_disable_unused_subtree
  67. clk_ignore_unused_setup
  68. clk_disable_unused
  69. clk_core_determine_round_nolock
  70. clk_core_init_rate_req
  71. clk_core_can_round
  72. clk_core_round_rate_nolock
  73. __clk_determine_rate
  74. clk_hw_round_rate
  75. clk_round_rate
  76. __clk_notify
  77. __clk_recalc_accuracies
  78. clk_core_get_accuracy
  79. clk_get_accuracy
  80. clk_recalc
  81. __clk_recalc_rates
  82. clk_core_get_rate
  83. clk_get_rate
  84. clk_fetch_parent_index
  85. clk_core_update_orphan_status
  86. clk_reparent
  87. __clk_set_parent_before
  88. __clk_set_parent_after
  89. __clk_set_parent
  90. __clk_speculate_rates
  91. clk_calc_subtree
  92. clk_calc_new_rates
  93. clk_propagate_rate_change
  94. clk_change_rate
  95. clk_core_req_round_rate_nolock
  96. clk_core_set_rate_nolock
  97. clk_set_rate
  98. clk_set_rate_exclusive
  99. clk_set_rate_range
  100. clk_set_min_rate
  101. clk_set_max_rate
  102. clk_get_parent
  103. __clk_init_parent
  104. clk_core_reparent
  105. clk_hw_reparent
  106. clk_has_parent
  107. clk_core_set_parent_nolock
  108. clk_hw_set_parent
  109. clk_set_parent
  110. clk_core_set_phase_nolock
  111. clk_set_phase
  112. clk_core_get_phase
  113. clk_get_phase
  114. clk_core_reset_duty_cycle_nolock
  115. clk_core_update_duty_cycle_nolock
  116. clk_core_update_duty_cycle_parent_nolock
  117. clk_core_set_duty_cycle_nolock
  118. clk_core_set_duty_cycle_parent_nolock
  119. clk_set_duty_cycle
  120. clk_core_get_scaled_duty_cycle
  121. clk_get_scaled_duty_cycle
  122. clk_is_match
  123. clk_summary_show_one
  124. clk_summary_show_subtree
  125. clk_summary_show
  126. clk_dump_one
  127. clk_dump_subtree
  128. clk_dump_show
  129. clk_flags_show
  130. possible_parent_show
  131. possible_parents_show
  132. current_parent_show
  133. clk_duty_cycle_show
  134. clk_min_rate_show
  135. clk_max_rate_show
  136. clk_debug_create_one
  137. clk_debug_register
  138. clk_debug_unregister
  139. clk_debug_init
  140. clk_debug_register
  141. clk_debug_reparent
  142. clk_debug_unregister
  143. clk_core_reparent_orphans_nolock
  144. __clk_core_init
  145. clk_core_link_consumer
  146. clk_core_unlink_consumer
  147. alloc_clk
  148. free_clk
  149. clk_hw_create_clk
  150. clk_cpy_name
  151. clk_core_populate_parent_map
  152. clk_core_free_parent_map
  153. __clk_register
  154. dev_or_parent_of_node
  155. clk_register
  156. clk_hw_register
  157. of_clk_hw_register
  158. __clk_release
  159. clk_nodrv_prepare_enable
  160. clk_nodrv_disable_unprepare
  161. clk_nodrv_set_rate
  162. clk_nodrv_set_parent
  163. clk_core_evict_parent_cache_subtree
  164. clk_core_evict_parent_cache
  165. clk_unregister
  166. clk_hw_unregister
  167. devm_clk_release
  168. devm_clk_hw_release
  169. devm_clk_register
  170. devm_clk_hw_register
  171. devm_clk_match
  172. devm_clk_hw_match
  173. devm_clk_unregister
  174. devm_clk_hw_unregister
  175. __clk_put
  176. clk_notifier_register
  177. clk_notifier_unregister
  178. clk_core_reparent_orphans
  179. of_clk_src_simple_get
  180. of_clk_hw_simple_get
  181. of_clk_src_onecell_get
  182. of_clk_hw_onecell_get
  183. of_clk_add_provider
  184. of_clk_add_hw_provider
  185. devm_of_clk_release_provider
  186. get_clk_provider_node
  187. devm_of_clk_add_hw_provider
  188. of_clk_del_provider
  189. devm_clk_provider_match
  190. devm_of_clk_del_provider
  191. of_parse_clkspec
  192. __of_clk_get_hw_from_provider
  193. of_clk_get_hw_from_clkspec
  194. of_clk_get_from_provider
  195. of_clk_get_hw
  196. __of_clk_get
  197. of_clk_get
  198. of_clk_get_by_name
  199. of_clk_get_parent_count
  200. of_clk_get_parent_name
  201. of_clk_parent_fill
  202. parent_ready
  203. of_clk_detect_critical
  204. of_clk_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   4  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   5  *
   6  * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
   7  */
   8 
   9 #include <linux/clk.h>
  10 #include <linux/clk-provider.h>
  11 #include <linux/clk/clk-conf.h>
  12 #include <linux/module.h>
  13 #include <linux/mutex.h>
  14 #include <linux/spinlock.h>
  15 #include <linux/err.h>
  16 #include <linux/list.h>
  17 #include <linux/slab.h>
  18 #include <linux/of.h>
  19 #include <linux/device.h>
  20 #include <linux/init.h>
  21 #include <linux/pm_runtime.h>
  22 #include <linux/sched.h>
  23 #include <linux/clkdev.h>
  24 
  25 #include "clk.h"
  26 
  27 static DEFINE_SPINLOCK(enable_lock);
  28 static DEFINE_MUTEX(prepare_lock);
  29 
  30 static struct task_struct *prepare_owner;
  31 static struct task_struct *enable_owner;
  32 
  33 static int prepare_refcnt;
  34 static int enable_refcnt;
  35 
  36 static HLIST_HEAD(clk_root_list);
  37 static HLIST_HEAD(clk_orphan_list);
  38 static LIST_HEAD(clk_notifier_list);
  39 
  40 static struct hlist_head *all_lists[] = {
  41         &clk_root_list,
  42         &clk_orphan_list,
  43         NULL,
  44 };
  45 
  46 /***    private data structures    ***/
  47 
  48 struct clk_parent_map {
  49         const struct clk_hw     *hw;
  50         struct clk_core         *core;
  51         const char              *fw_name;
  52         const char              *name;
  53         int                     index;
  54 };
  55 
  56 struct clk_core {
  57         const char              *name;
  58         const struct clk_ops    *ops;
  59         struct clk_hw           *hw;
  60         struct module           *owner;
  61         struct device           *dev;
  62         struct device_node      *of_node;
  63         struct clk_core         *parent;
  64         struct clk_parent_map   *parents;
  65         u8                      num_parents;
  66         u8                      new_parent_index;
  67         unsigned long           rate;
  68         unsigned long           req_rate;
  69         unsigned long           new_rate;
  70         struct clk_core         *new_parent;
  71         struct clk_core         *new_child;
  72         unsigned long           flags;
  73         bool                    orphan;
  74         bool                    rpm_enabled;
  75         unsigned int            enable_count;
  76         unsigned int            prepare_count;
  77         unsigned int            protect_count;
  78         unsigned long           min_rate;
  79         unsigned long           max_rate;
  80         unsigned long           accuracy;
  81         int                     phase;
  82         struct clk_duty         duty;
  83         struct hlist_head       children;
  84         struct hlist_node       child_node;
  85         struct hlist_head       clks;
  86         unsigned int            notifier_count;
  87 #ifdef CONFIG_DEBUG_FS
  88         struct dentry           *dentry;
  89         struct hlist_node       debug_node;
  90 #endif
  91         struct kref             ref;
  92 };
  93 
  94 #define CREATE_TRACE_POINTS
  95 #include <trace/events/clk.h>
  96 
  97 struct clk {
  98         struct clk_core *core;
  99         struct device *dev;
 100         const char *dev_id;
 101         const char *con_id;
 102         unsigned long min_rate;
 103         unsigned long max_rate;
 104         unsigned int exclusive_count;
 105         struct hlist_node clks_node;
 106 };
 107 
 108 /***           runtime pm          ***/
 109 static int clk_pm_runtime_get(struct clk_core *core)
 110 {
 111         int ret;
 112 
 113         if (!core->rpm_enabled)
 114                 return 0;
 115 
 116         ret = pm_runtime_get_sync(core->dev);
 117         if (ret < 0) {
 118                 pm_runtime_put_noidle(core->dev);
 119                 return ret;
 120         }
 121         return 0;
 122 }
 123 
 124 static void clk_pm_runtime_put(struct clk_core *core)
 125 {
 126         if (!core->rpm_enabled)
 127                 return;
 128 
 129         pm_runtime_put_sync(core->dev);
 130 }
 131 
 132 /***           locking             ***/
 133 static void clk_prepare_lock(void)
 134 {
 135         if (!mutex_trylock(&prepare_lock)) {
 136                 if (prepare_owner == current) {
 137                         prepare_refcnt++;
 138                         return;
 139                 }
 140                 mutex_lock(&prepare_lock);
 141         }
 142         WARN_ON_ONCE(prepare_owner != NULL);
 143         WARN_ON_ONCE(prepare_refcnt != 0);
 144         prepare_owner = current;
 145         prepare_refcnt = 1;
 146 }
 147 
 148 static void clk_prepare_unlock(void)
 149 {
 150         WARN_ON_ONCE(prepare_owner != current);
 151         WARN_ON_ONCE(prepare_refcnt == 0);
 152 
 153         if (--prepare_refcnt)
 154                 return;
 155         prepare_owner = NULL;
 156         mutex_unlock(&prepare_lock);
 157 }
 158 
 159 static unsigned long clk_enable_lock(void)
 160         __acquires(enable_lock)
 161 {
 162         unsigned long flags;
 163 
 164         /*
 165          * On UP systems, spin_trylock_irqsave() always returns true, even if
 166          * we already hold the lock. So, in that case, we rely only on
 167          * reference counting.
 168          */
 169         if (!IS_ENABLED(CONFIG_SMP) ||
 170             !spin_trylock_irqsave(&enable_lock, flags)) {
 171                 if (enable_owner == current) {
 172                         enable_refcnt++;
 173                         __acquire(enable_lock);
 174                         if (!IS_ENABLED(CONFIG_SMP))
 175                                 local_save_flags(flags);
 176                         return flags;
 177                 }
 178                 spin_lock_irqsave(&enable_lock, flags);
 179         }
 180         WARN_ON_ONCE(enable_owner != NULL);
 181         WARN_ON_ONCE(enable_refcnt != 0);
 182         enable_owner = current;
 183         enable_refcnt = 1;
 184         return flags;
 185 }
 186 
 187 static void clk_enable_unlock(unsigned long flags)
 188         __releases(enable_lock)
 189 {
 190         WARN_ON_ONCE(enable_owner != current);
 191         WARN_ON_ONCE(enable_refcnt == 0);
 192 
 193         if (--enable_refcnt) {
 194                 __release(enable_lock);
 195                 return;
 196         }
 197         enable_owner = NULL;
 198         spin_unlock_irqrestore(&enable_lock, flags);
 199 }
 200 
 201 static bool clk_core_rate_is_protected(struct clk_core *core)
 202 {
 203         return core->protect_count;
 204 }
 205 
 206 static bool clk_core_is_prepared(struct clk_core *core)
 207 {
 208         bool ret = false;
 209 
 210         /*
 211          * .is_prepared is optional for clocks that can prepare
 212          * fall back to software usage counter if it is missing
 213          */
 214         if (!core->ops->is_prepared)
 215                 return core->prepare_count;
 216 
 217         if (!clk_pm_runtime_get(core)) {
 218                 ret = core->ops->is_prepared(core->hw);
 219                 clk_pm_runtime_put(core);
 220         }
 221 
 222         return ret;
 223 }
 224 
 225 static bool clk_core_is_enabled(struct clk_core *core)
 226 {
 227         bool ret = false;
 228 
 229         /*
 230          * .is_enabled is only mandatory for clocks that gate
 231          * fall back to software usage counter if .is_enabled is missing
 232          */
 233         if (!core->ops->is_enabled)
 234                 return core->enable_count;
 235 
 236         /*
 237          * Check if clock controller's device is runtime active before
 238          * calling .is_enabled callback. If not, assume that clock is
 239          * disabled, because we might be called from atomic context, from
 240          * which pm_runtime_get() is not allowed.
 241          * This function is called mainly from clk_disable_unused_subtree,
 242          * which ensures proper runtime pm activation of controller before
 243          * taking enable spinlock, but the below check is needed if one tries
 244          * to call it from other places.
 245          */
 246         if (core->rpm_enabled) {
 247                 pm_runtime_get_noresume(core->dev);
 248                 if (!pm_runtime_active(core->dev)) {
 249                         ret = false;
 250                         goto done;
 251                 }
 252         }
 253 
 254         ret = core->ops->is_enabled(core->hw);
 255 done:
 256         if (core->rpm_enabled)
 257                 pm_runtime_put(core->dev);
 258 
 259         return ret;
 260 }
 261 
 262 /***    helper functions   ***/
 263 
 264 const char *__clk_get_name(const struct clk *clk)
 265 {
 266         return !clk ? NULL : clk->core->name;
 267 }
 268 EXPORT_SYMBOL_GPL(__clk_get_name);
 269 
 270 const char *clk_hw_get_name(const struct clk_hw *hw)
 271 {
 272         return hw->core->name;
 273 }
 274 EXPORT_SYMBOL_GPL(clk_hw_get_name);
 275 
 276 struct clk_hw *__clk_get_hw(struct clk *clk)
 277 {
 278         return !clk ? NULL : clk->core->hw;
 279 }
 280 EXPORT_SYMBOL_GPL(__clk_get_hw);
 281 
 282 unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
 283 {
 284         return hw->core->num_parents;
 285 }
 286 EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
 287 
 288 struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
 289 {
 290         return hw->core->parent ? hw->core->parent->hw : NULL;
 291 }
 292 EXPORT_SYMBOL_GPL(clk_hw_get_parent);
 293 
 294 static struct clk_core *__clk_lookup_subtree(const char *name,
 295                                              struct clk_core *core)
 296 {
 297         struct clk_core *child;
 298         struct clk_core *ret;
 299 
 300         if (!strcmp(core->name, name))
 301                 return core;
 302 
 303         hlist_for_each_entry(child, &core->children, child_node) {
 304                 ret = __clk_lookup_subtree(name, child);
 305                 if (ret)
 306                         return ret;
 307         }
 308 
 309         return NULL;
 310 }
 311 
 312 static struct clk_core *clk_core_lookup(const char *name)
 313 {
 314         struct clk_core *root_clk;
 315         struct clk_core *ret;
 316 
 317         if (!name)
 318                 return NULL;
 319 
 320         /* search the 'proper' clk tree first */
 321         hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
 322                 ret = __clk_lookup_subtree(name, root_clk);
 323                 if (ret)
 324                         return ret;
 325         }
 326 
 327         /* if not found, then search the orphan tree */
 328         hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
 329                 ret = __clk_lookup_subtree(name, root_clk);
 330                 if (ret)
 331                         return ret;
 332         }
 333 
 334         return NULL;
 335 }
 336 
 337 #ifdef CONFIG_OF
 338 static int of_parse_clkspec(const struct device_node *np, int index,
 339                             const char *name, struct of_phandle_args *out_args);
 340 static struct clk_hw *
 341 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
 342 #else
 343 static inline int of_parse_clkspec(const struct device_node *np, int index,
 344                                    const char *name,
 345                                    struct of_phandle_args *out_args)
 346 {
 347         return -ENOENT;
 348 }
 349 static inline struct clk_hw *
 350 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
 351 {
 352         return ERR_PTR(-ENOENT);
 353 }
 354 #endif
 355 
 356 /**
 357  * clk_core_get - Find the clk_core parent of a clk
 358  * @core: clk to find parent of
 359  * @p_index: parent index to search for
 360  *
 361  * This is the preferred method for clk providers to find the parent of a
 362  * clk when that parent is external to the clk controller. The parent_names
 363  * array is indexed and treated as a local name matching a string in the device
 364  * node's 'clock-names' property or as the 'con_id' matching the device's
 365  * dev_name() in a clk_lookup. This allows clk providers to use their own
 366  * namespace instead of looking for a globally unique parent string.
 367  *
 368  * For example the following DT snippet would allow a clock registered by the
 369  * clock-controller@c001 that has a clk_init_data::parent_data array
 370  * with 'xtal' in the 'name' member to find the clock provided by the
 371  * clock-controller@f00abcd without needing to get the globally unique name of
 372  * the xtal clk.
 373  *
 374  *      parent: clock-controller@f00abcd {
 375  *              reg = <0xf00abcd 0xabcd>;
 376  *              #clock-cells = <0>;
 377  *      };
 378  *
 379  *      clock-controller@c001 {
 380  *              reg = <0xc001 0xf00d>;
 381  *              clocks = <&parent>;
 382  *              clock-names = "xtal";
 383  *              #clock-cells = <1>;
 384  *      };
 385  *
 386  * Returns: -ENOENT when the provider can't be found or the clk doesn't
 387  * exist in the provider or the name can't be found in the DT node or
 388  * in a clkdev lookup. NULL when the provider knows about the clk but it
 389  * isn't provided on this system.
 390  * A valid clk_core pointer when the clk can be found in the provider.
 391  */
 392 static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
 393 {
 394         const char *name = core->parents[p_index].fw_name;
 395         int index = core->parents[p_index].index;
 396         struct clk_hw *hw = ERR_PTR(-ENOENT);
 397         struct device *dev = core->dev;
 398         const char *dev_id = dev ? dev_name(dev) : NULL;
 399         struct device_node *np = core->of_node;
 400         struct of_phandle_args clkspec;
 401 
 402         if (np && (name || index >= 0) &&
 403             !of_parse_clkspec(np, index, name, &clkspec)) {
 404                 hw = of_clk_get_hw_from_clkspec(&clkspec);
 405                 of_node_put(clkspec.np);
 406         } else if (name) {
 407                 /*
 408                  * If the DT search above couldn't find the provider fallback to
 409                  * looking up via clkdev based clk_lookups.
 410                  */
 411                 hw = clk_find_hw(dev_id, name);
 412         }
 413 
 414         if (IS_ERR(hw))
 415                 return ERR_CAST(hw);
 416 
 417         return hw->core;
 418 }
 419 
 420 static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
 421 {
 422         struct clk_parent_map *entry = &core->parents[index];
 423         struct clk_core *parent = ERR_PTR(-ENOENT);
 424 
 425         if (entry->hw) {
 426                 parent = entry->hw->core;
 427                 /*
 428                  * We have a direct reference but it isn't registered yet?
 429                  * Orphan it and let clk_reparent() update the orphan status
 430                  * when the parent is registered.
 431                  */
 432                 if (!parent)
 433                         parent = ERR_PTR(-EPROBE_DEFER);
 434         } else {
 435                 parent = clk_core_get(core, index);
 436                 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
 437                         parent = clk_core_lookup(entry->name);
 438         }
 439 
 440         /* Only cache it if it's not an error */
 441         if (!IS_ERR(parent))
 442                 entry->core = parent;
 443 }
 444 
 445 static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
 446                                                          u8 index)
 447 {
 448         if (!core || index >= core->num_parents || !core->parents)
 449                 return NULL;
 450 
 451         if (!core->parents[index].core)
 452                 clk_core_fill_parent_index(core, index);
 453 
 454         return core->parents[index].core;
 455 }
 456 
 457 struct clk_hw *
 458 clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
 459 {
 460         struct clk_core *parent;
 461 
 462         parent = clk_core_get_parent_by_index(hw->core, index);
 463 
 464         return !parent ? NULL : parent->hw;
 465 }
 466 EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
 467 
 468 unsigned int __clk_get_enable_count(struct clk *clk)
 469 {
 470         return !clk ? 0 : clk->core->enable_count;
 471 }
 472 
 473 static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
 474 {
 475         if (!core)
 476                 return 0;
 477 
 478         if (!core->num_parents || core->parent)
 479                 return core->rate;
 480 
 481         /*
 482          * Clk must have a parent because num_parents > 0 but the parent isn't
 483          * known yet. Best to return 0 as the rate of this clk until we can
 484          * properly recalc the rate based on the parent's rate.
 485          */
 486         return 0;
 487 }
 488 
 489 unsigned long clk_hw_get_rate(const struct clk_hw *hw)
 490 {
 491         return clk_core_get_rate_nolock(hw->core);
 492 }
 493 EXPORT_SYMBOL_GPL(clk_hw_get_rate);
 494 
 495 static unsigned long __clk_get_accuracy(struct clk_core *core)
 496 {
 497         if (!core)
 498                 return 0;
 499 
 500         return core->accuracy;
 501 }
 502 
 503 unsigned long __clk_get_flags(struct clk *clk)
 504 {
 505         return !clk ? 0 : clk->core->flags;
 506 }
 507 EXPORT_SYMBOL_GPL(__clk_get_flags);
 508 
 509 unsigned long clk_hw_get_flags(const struct clk_hw *hw)
 510 {
 511         return hw->core->flags;
 512 }
 513 EXPORT_SYMBOL_GPL(clk_hw_get_flags);
 514 
 515 bool clk_hw_is_prepared(const struct clk_hw *hw)
 516 {
 517         return clk_core_is_prepared(hw->core);
 518 }
 519 EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
 520 
 521 bool clk_hw_rate_is_protected(const struct clk_hw *hw)
 522 {
 523         return clk_core_rate_is_protected(hw->core);
 524 }
 525 EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
 526 
 527 bool clk_hw_is_enabled(const struct clk_hw *hw)
 528 {
 529         return clk_core_is_enabled(hw->core);
 530 }
 531 EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
 532 
 533 bool __clk_is_enabled(struct clk *clk)
 534 {
 535         if (!clk)
 536                 return false;
 537 
 538         return clk_core_is_enabled(clk->core);
 539 }
 540 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 541 
 542 static bool mux_is_better_rate(unsigned long rate, unsigned long now,
 543                            unsigned long best, unsigned long flags)
 544 {
 545         if (flags & CLK_MUX_ROUND_CLOSEST)
 546                 return abs(now - rate) < abs(best - rate);
 547 
 548         return now <= rate && now > best;
 549 }
 550 
 551 int clk_mux_determine_rate_flags(struct clk_hw *hw,
 552                                  struct clk_rate_request *req,
 553                                  unsigned long flags)
 554 {
 555         struct clk_core *core = hw->core, *parent, *best_parent = NULL;
 556         int i, num_parents, ret;
 557         unsigned long best = 0;
 558         struct clk_rate_request parent_req = *req;
 559 
 560         /* if NO_REPARENT flag set, pass through to current parent */
 561         if (core->flags & CLK_SET_RATE_NO_REPARENT) {
 562                 parent = core->parent;
 563                 if (core->flags & CLK_SET_RATE_PARENT) {
 564                         ret = __clk_determine_rate(parent ? parent->hw : NULL,
 565                                                    &parent_req);
 566                         if (ret)
 567                                 return ret;
 568 
 569                         best = parent_req.rate;
 570                 } else if (parent) {
 571                         best = clk_core_get_rate_nolock(parent);
 572                 } else {
 573                         best = clk_core_get_rate_nolock(core);
 574                 }
 575 
 576                 goto out;
 577         }
 578 
 579         /* find the parent that can provide the fastest rate <= rate */
 580         num_parents = core->num_parents;
 581         for (i = 0; i < num_parents; i++) {
 582                 parent = clk_core_get_parent_by_index(core, i);
 583                 if (!parent)
 584                         continue;
 585 
 586                 if (core->flags & CLK_SET_RATE_PARENT) {
 587                         parent_req = *req;
 588                         ret = __clk_determine_rate(parent->hw, &parent_req);
 589                         if (ret)
 590                                 continue;
 591                 } else {
 592                         parent_req.rate = clk_core_get_rate_nolock(parent);
 593                 }
 594 
 595                 if (mux_is_better_rate(req->rate, parent_req.rate,
 596                                        best, flags)) {
 597                         best_parent = parent;
 598                         best = parent_req.rate;
 599                 }
 600         }
 601 
 602         if (!best_parent)
 603                 return -EINVAL;
 604 
 605 out:
 606         if (best_parent)
 607                 req->best_parent_hw = best_parent->hw;
 608         req->best_parent_rate = best;
 609         req->rate = best;
 610 
 611         return 0;
 612 }
 613 EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
 614 
 615 struct clk *__clk_lookup(const char *name)
 616 {
 617         struct clk_core *core = clk_core_lookup(name);
 618 
 619         return !core ? NULL : core->hw->clk;
 620 }
 621 
 622 static void clk_core_get_boundaries(struct clk_core *core,
 623                                     unsigned long *min_rate,
 624                                     unsigned long *max_rate)
 625 {
 626         struct clk *clk_user;
 627 
 628         lockdep_assert_held(&prepare_lock);
 629 
 630         *min_rate = core->min_rate;
 631         *max_rate = core->max_rate;
 632 
 633         hlist_for_each_entry(clk_user, &core->clks, clks_node)
 634                 *min_rate = max(*min_rate, clk_user->min_rate);
 635 
 636         hlist_for_each_entry(clk_user, &core->clks, clks_node)
 637                 *max_rate = min(*max_rate, clk_user->max_rate);
 638 }
 639 
 640 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
 641                            unsigned long max_rate)
 642 {
 643         hw->core->min_rate = min_rate;
 644         hw->core->max_rate = max_rate;
 645 }
 646 EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
 647 
 648 /*
 649  * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
 650  * @hw: mux type clk to determine rate on
 651  * @req: rate request, also used to return preferred parent and frequencies
 652  *
 653  * Helper for finding best parent to provide a given frequency. This can be used
 654  * directly as a determine_rate callback (e.g. for a mux), or from a more
 655  * complex clock that may combine a mux with other operations.
 656  *
 657  * Returns: 0 on success, -EERROR value on error
 658  */
 659 int __clk_mux_determine_rate(struct clk_hw *hw,
 660                              struct clk_rate_request *req)
 661 {
 662         return clk_mux_determine_rate_flags(hw, req, 0);
 663 }
 664 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 665 
 666 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
 667                                      struct clk_rate_request *req)
 668 {
 669         return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
 670 }
 671 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 672 
 673 /***        clk api        ***/
 674 
 675 static void clk_core_rate_unprotect(struct clk_core *core)
 676 {
 677         lockdep_assert_held(&prepare_lock);
 678 
 679         if (!core)
 680                 return;
 681 
 682         if (WARN(core->protect_count == 0,
 683             "%s already unprotected\n", core->name))
 684                 return;
 685 
 686         if (--core->protect_count > 0)
 687                 return;
 688 
 689         clk_core_rate_unprotect(core->parent);
 690 }
 691 
 692 static int clk_core_rate_nuke_protect(struct clk_core *core)
 693 {
 694         int ret;
 695 
 696         lockdep_assert_held(&prepare_lock);
 697 
 698         if (!core)
 699                 return -EINVAL;
 700 
 701         if (core->protect_count == 0)
 702                 return 0;
 703 
 704         ret = core->protect_count;
 705         core->protect_count = 1;
 706         clk_core_rate_unprotect(core);
 707 
 708         return ret;
 709 }
 710 
 711 /**
 712  * clk_rate_exclusive_put - release exclusivity over clock rate control
 713  * @clk: the clk over which the exclusivity is released
 714  *
 715  * clk_rate_exclusive_put() completes a critical section during which a clock
 716  * consumer cannot tolerate any other consumer making any operation on the
 717  * clock which could result in a rate change or rate glitch. Exclusive clocks
 718  * cannot have their rate changed, either directly or indirectly due to changes
 719  * further up the parent chain of clocks. As a result, clocks up parent chain
 720  * also get under exclusive control of the calling consumer.
 721  *
 722  * If exlusivity is claimed more than once on clock, even by the same consumer,
 723  * the rate effectively gets locked as exclusivity can't be preempted.
 724  *
 725  * Calls to clk_rate_exclusive_put() must be balanced with calls to
 726  * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
 727  * error status.
 728  */
 729 void clk_rate_exclusive_put(struct clk *clk)
 730 {
 731         if (!clk)
 732                 return;
 733 
 734         clk_prepare_lock();
 735 
 736         /*
 737          * if there is something wrong with this consumer protect count, stop
 738          * here before messing with the provider
 739          */
 740         if (WARN_ON(clk->exclusive_count <= 0))
 741                 goto out;
 742 
 743         clk_core_rate_unprotect(clk->core);
 744         clk->exclusive_count--;
 745 out:
 746         clk_prepare_unlock();
 747 }
 748 EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
 749 
 750 static void clk_core_rate_protect(struct clk_core *core)
 751 {
 752         lockdep_assert_held(&prepare_lock);
 753 
 754         if (!core)
 755                 return;
 756 
 757         if (core->protect_count == 0)
 758                 clk_core_rate_protect(core->parent);
 759 
 760         core->protect_count++;
 761 }
 762 
 763 static void clk_core_rate_restore_protect(struct clk_core *core, int count)
 764 {
 765         lockdep_assert_held(&prepare_lock);
 766 
 767         if (!core)
 768                 return;
 769 
 770         if (count == 0)
 771                 return;
 772 
 773         clk_core_rate_protect(core);
 774         core->protect_count = count;
 775 }
 776 
 777 /**
 778  * clk_rate_exclusive_get - get exclusivity over the clk rate control
 779  * @clk: the clk over which the exclusity of rate control is requested
 780  *
 781  * clk_rate_exlusive_get() begins a critical section during which a clock
 782  * consumer cannot tolerate any other consumer making any operation on the
 783  * clock which could result in a rate change or rate glitch. Exclusive clocks
 784  * cannot have their rate changed, either directly or indirectly due to changes
 785  * further up the parent chain of clocks. As a result, clocks up parent chain
 786  * also get under exclusive control of the calling consumer.
 787  *
 788  * If exlusivity is claimed more than once on clock, even by the same consumer,
 789  * the rate effectively gets locked as exclusivity can't be preempted.
 790  *
 791  * Calls to clk_rate_exclusive_get() should be balanced with calls to
 792  * clk_rate_exclusive_put(). Calls to this function may sleep.
 793  * Returns 0 on success, -EERROR otherwise
 794  */
 795 int clk_rate_exclusive_get(struct clk *clk)
 796 {
 797         if (!clk)
 798                 return 0;
 799 
 800         clk_prepare_lock();
 801         clk_core_rate_protect(clk->core);
 802         clk->exclusive_count++;
 803         clk_prepare_unlock();
 804 
 805         return 0;
 806 }
 807 EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
 808 
 809 static void clk_core_unprepare(struct clk_core *core)
 810 {
 811         lockdep_assert_held(&prepare_lock);
 812 
 813         if (!core)
 814                 return;
 815 
 816         if (WARN(core->prepare_count == 0,
 817             "%s already unprepared\n", core->name))
 818                 return;
 819 
 820         if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
 821             "Unpreparing critical %s\n", core->name))
 822                 return;
 823 
 824         if (core->flags & CLK_SET_RATE_GATE)
 825                 clk_core_rate_unprotect(core);
 826 
 827         if (--core->prepare_count > 0)
 828                 return;
 829 
 830         WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
 831 
 832         trace_clk_unprepare(core);
 833 
 834         if (core->ops->unprepare)
 835                 core->ops->unprepare(core->hw);
 836 
 837         clk_pm_runtime_put(core);
 838 
 839         trace_clk_unprepare_complete(core);
 840         clk_core_unprepare(core->parent);
 841 }
 842 
 843 static void clk_core_unprepare_lock(struct clk_core *core)
 844 {
 845         clk_prepare_lock();
 846         clk_core_unprepare(core);
 847         clk_prepare_unlock();
 848 }
 849 
 850 /**
 851  * clk_unprepare - undo preparation of a clock source
 852  * @clk: the clk being unprepared
 853  *
 854  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 855  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 856  * if the operation may sleep.  One example is a clk which is accessed over
 857  * I2c.  In the complex case a clk gate operation may require a fast and a slow
 858  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 859  * exclusive.  In fact clk_disable must be called before clk_unprepare.
 860  */
 861 void clk_unprepare(struct clk *clk)
 862 {
 863         if (IS_ERR_OR_NULL(clk))
 864                 return;
 865 
 866         clk_core_unprepare_lock(clk->core);
 867 }
 868 EXPORT_SYMBOL_GPL(clk_unprepare);
 869 
 870 static int clk_core_prepare(struct clk_core *core)
 871 {
 872         int ret = 0;
 873 
 874         lockdep_assert_held(&prepare_lock);
 875 
 876         if (!core)
 877                 return 0;
 878 
 879         if (core->prepare_count == 0) {
 880                 ret = clk_pm_runtime_get(core);
 881                 if (ret)
 882                         return ret;
 883 
 884                 ret = clk_core_prepare(core->parent);
 885                 if (ret)
 886                         goto runtime_put;
 887 
 888                 trace_clk_prepare(core);
 889 
 890                 if (core->ops->prepare)
 891                         ret = core->ops->prepare(core->hw);
 892 
 893                 trace_clk_prepare_complete(core);
 894 
 895                 if (ret)
 896                         goto unprepare;
 897         }
 898 
 899         core->prepare_count++;
 900 
 901         /*
 902          * CLK_SET_RATE_GATE is a special case of clock protection
 903          * Instead of a consumer claiming exclusive rate control, it is
 904          * actually the provider which prevents any consumer from making any
 905          * operation which could result in a rate change or rate glitch while
 906          * the clock is prepared.
 907          */
 908         if (core->flags & CLK_SET_RATE_GATE)
 909                 clk_core_rate_protect(core);
 910 
 911         return 0;
 912 unprepare:
 913         clk_core_unprepare(core->parent);
 914 runtime_put:
 915         clk_pm_runtime_put(core);
 916         return ret;
 917 }
 918 
 919 static int clk_core_prepare_lock(struct clk_core *core)
 920 {
 921         int ret;
 922 
 923         clk_prepare_lock();
 924         ret = clk_core_prepare(core);
 925         clk_prepare_unlock();
 926 
 927         return ret;
 928 }
 929 
 930 /**
 931  * clk_prepare - prepare a clock source
 932  * @clk: the clk being prepared
 933  *
 934  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 935  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 936  * operation may sleep.  One example is a clk which is accessed over I2c.  In
 937  * the complex case a clk ungate operation may require a fast and a slow part.
 938  * It is this reason that clk_prepare and clk_enable are not mutually
 939  * exclusive.  In fact clk_prepare must be called before clk_enable.
 940  * Returns 0 on success, -EERROR otherwise.
 941  */
 942 int clk_prepare(struct clk *clk)
 943 {
 944         if (!clk)
 945                 return 0;
 946 
 947         return clk_core_prepare_lock(clk->core);
 948 }
 949 EXPORT_SYMBOL_GPL(clk_prepare);
 950 
 951 static void clk_core_disable(struct clk_core *core)
 952 {
 953         lockdep_assert_held(&enable_lock);
 954 
 955         if (!core)
 956                 return;
 957 
 958         if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
 959                 return;
 960 
 961         if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
 962             "Disabling critical %s\n", core->name))
 963                 return;
 964 
 965         if (--core->enable_count > 0)
 966                 return;
 967 
 968         trace_clk_disable_rcuidle(core);
 969 
 970         if (core->ops->disable)
 971                 core->ops->disable(core->hw);
 972 
 973         trace_clk_disable_complete_rcuidle(core);
 974 
 975         clk_core_disable(core->parent);
 976 }
 977 
 978 static void clk_core_disable_lock(struct clk_core *core)
 979 {
 980         unsigned long flags;
 981 
 982         flags = clk_enable_lock();
 983         clk_core_disable(core);
 984         clk_enable_unlock(flags);
 985 }
 986 
 987 /**
 988  * clk_disable - gate a clock
 989  * @clk: the clk being gated
 990  *
 991  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 992  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 993  * clk if the operation is fast and will never sleep.  One example is a
 994  * SoC-internal clk which is controlled via simple register writes.  In the
 995  * complex case a clk gate operation may require a fast and a slow part.  It is
 996  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 997  * In fact clk_disable must be called before clk_unprepare.
 998  */
 999 void clk_disable(struct clk *clk)
1000 {
1001         if (IS_ERR_OR_NULL(clk))
1002                 return;
1003 
1004         clk_core_disable_lock(clk->core);
1005 }
1006 EXPORT_SYMBOL_GPL(clk_disable);
1007 
1008 static int clk_core_enable(struct clk_core *core)
1009 {
1010         int ret = 0;
1011 
1012         lockdep_assert_held(&enable_lock);
1013 
1014         if (!core)
1015                 return 0;
1016 
1017         if (WARN(core->prepare_count == 0,
1018             "Enabling unprepared %s\n", core->name))
1019                 return -ESHUTDOWN;
1020 
1021         if (core->enable_count == 0) {
1022                 ret = clk_core_enable(core->parent);
1023 
1024                 if (ret)
1025                         return ret;
1026 
1027                 trace_clk_enable_rcuidle(core);
1028 
1029                 if (core->ops->enable)
1030                         ret = core->ops->enable(core->hw);
1031 
1032                 trace_clk_enable_complete_rcuidle(core);
1033 
1034                 if (ret) {
1035                         clk_core_disable(core->parent);
1036                         return ret;
1037                 }
1038         }
1039 
1040         core->enable_count++;
1041         return 0;
1042 }
1043 
1044 static int clk_core_enable_lock(struct clk_core *core)
1045 {
1046         unsigned long flags;
1047         int ret;
1048 
1049         flags = clk_enable_lock();
1050         ret = clk_core_enable(core);
1051         clk_enable_unlock(flags);
1052 
1053         return ret;
1054 }
1055 
1056 /**
1057  * clk_gate_restore_context - restore context for poweroff
1058  * @hw: the clk_hw pointer of clock whose state is to be restored
1059  *
1060  * The clock gate restore context function enables or disables
1061  * the gate clocks based on the enable_count. This is done in cases
1062  * where the clock context is lost and based on the enable_count
1063  * the clock either needs to be enabled/disabled. This
1064  * helps restore the state of gate clocks.
1065  */
1066 void clk_gate_restore_context(struct clk_hw *hw)
1067 {
1068         struct clk_core *core = hw->core;
1069 
1070         if (core->enable_count)
1071                 core->ops->enable(hw);
1072         else
1073                 core->ops->disable(hw);
1074 }
1075 EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1076 
1077 static int clk_core_save_context(struct clk_core *core)
1078 {
1079         struct clk_core *child;
1080         int ret = 0;
1081 
1082         hlist_for_each_entry(child, &core->children, child_node) {
1083                 ret = clk_core_save_context(child);
1084                 if (ret < 0)
1085                         return ret;
1086         }
1087 
1088         if (core->ops && core->ops->save_context)
1089                 ret = core->ops->save_context(core->hw);
1090 
1091         return ret;
1092 }
1093 
1094 static void clk_core_restore_context(struct clk_core *core)
1095 {
1096         struct clk_core *child;
1097 
1098         if (core->ops && core->ops->restore_context)
1099                 core->ops->restore_context(core->hw);
1100 
1101         hlist_for_each_entry(child, &core->children, child_node)
1102                 clk_core_restore_context(child);
1103 }
1104 
1105 /**
1106  * clk_save_context - save clock context for poweroff
1107  *
1108  * Saves the context of the clock register for powerstates in which the
1109  * contents of the registers will be lost. Occurs deep within the suspend
1110  * code.  Returns 0 on success.
1111  */
1112 int clk_save_context(void)
1113 {
1114         struct clk_core *clk;
1115         int ret;
1116 
1117         hlist_for_each_entry(clk, &clk_root_list, child_node) {
1118                 ret = clk_core_save_context(clk);
1119                 if (ret < 0)
1120                         return ret;
1121         }
1122 
1123         hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1124                 ret = clk_core_save_context(clk);
1125                 if (ret < 0)
1126                         return ret;
1127         }
1128 
1129         return 0;
1130 }
1131 EXPORT_SYMBOL_GPL(clk_save_context);
1132 
1133 /**
1134  * clk_restore_context - restore clock context after poweroff
1135  *
1136  * Restore the saved clock context upon resume.
1137  *
1138  */
1139 void clk_restore_context(void)
1140 {
1141         struct clk_core *core;
1142 
1143         hlist_for_each_entry(core, &clk_root_list, child_node)
1144                 clk_core_restore_context(core);
1145 
1146         hlist_for_each_entry(core, &clk_orphan_list, child_node)
1147                 clk_core_restore_context(core);
1148 }
1149 EXPORT_SYMBOL_GPL(clk_restore_context);
1150 
1151 /**
1152  * clk_enable - ungate a clock
1153  * @clk: the clk being ungated
1154  *
1155  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1156  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1157  * if the operation will never sleep.  One example is a SoC-internal clk which
1158  * is controlled via simple register writes.  In the complex case a clk ungate
1159  * operation may require a fast and a slow part.  It is this reason that
1160  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1161  * must be called before clk_enable.  Returns 0 on success, -EERROR
1162  * otherwise.
1163  */
1164 int clk_enable(struct clk *clk)
1165 {
1166         if (!clk)
1167                 return 0;
1168 
1169         return clk_core_enable_lock(clk->core);
1170 }
1171 EXPORT_SYMBOL_GPL(clk_enable);
1172 
1173 static int clk_core_prepare_enable(struct clk_core *core)
1174 {
1175         int ret;
1176 
1177         ret = clk_core_prepare_lock(core);
1178         if (ret)
1179                 return ret;
1180 
1181         ret = clk_core_enable_lock(core);
1182         if (ret)
1183                 clk_core_unprepare_lock(core);
1184 
1185         return ret;
1186 }
1187 
1188 static void clk_core_disable_unprepare(struct clk_core *core)
1189 {
1190         clk_core_disable_lock(core);
1191         clk_core_unprepare_lock(core);
1192 }
1193 
1194 static void clk_unprepare_unused_subtree(struct clk_core *core)
1195 {
1196         struct clk_core *child;
1197 
1198         lockdep_assert_held(&prepare_lock);
1199 
1200         hlist_for_each_entry(child, &core->children, child_node)
1201                 clk_unprepare_unused_subtree(child);
1202 
1203         if (core->prepare_count)
1204                 return;
1205 
1206         if (core->flags & CLK_IGNORE_UNUSED)
1207                 return;
1208 
1209         if (clk_pm_runtime_get(core))
1210                 return;
1211 
1212         if (clk_core_is_prepared(core)) {
1213                 trace_clk_unprepare(core);
1214                 if (core->ops->unprepare_unused)
1215                         core->ops->unprepare_unused(core->hw);
1216                 else if (core->ops->unprepare)
1217                         core->ops->unprepare(core->hw);
1218                 trace_clk_unprepare_complete(core);
1219         }
1220 
1221         clk_pm_runtime_put(core);
1222 }
1223 
1224 static void clk_disable_unused_subtree(struct clk_core *core)
1225 {
1226         struct clk_core *child;
1227         unsigned long flags;
1228 
1229         lockdep_assert_held(&prepare_lock);
1230 
1231         hlist_for_each_entry(child, &core->children, child_node)
1232                 clk_disable_unused_subtree(child);
1233 
1234         if (core->flags & CLK_OPS_PARENT_ENABLE)
1235                 clk_core_prepare_enable(core->parent);
1236 
1237         if (clk_pm_runtime_get(core))
1238                 goto unprepare_out;
1239 
1240         flags = clk_enable_lock();
1241 
1242         if (core->enable_count)
1243                 goto unlock_out;
1244 
1245         if (core->flags & CLK_IGNORE_UNUSED)
1246                 goto unlock_out;
1247 
1248         /*
1249          * some gate clocks have special needs during the disable-unused
1250          * sequence.  call .disable_unused if available, otherwise fall
1251          * back to .disable
1252          */
1253         if (clk_core_is_enabled(core)) {
1254                 trace_clk_disable(core);
1255                 if (core->ops->disable_unused)
1256                         core->ops->disable_unused(core->hw);
1257                 else if (core->ops->disable)
1258                         core->ops->disable(core->hw);
1259                 trace_clk_disable_complete(core);
1260         }
1261 
1262 unlock_out:
1263         clk_enable_unlock(flags);
1264         clk_pm_runtime_put(core);
1265 unprepare_out:
1266         if (core->flags & CLK_OPS_PARENT_ENABLE)
1267                 clk_core_disable_unprepare(core->parent);
1268 }
1269 
1270 static bool clk_ignore_unused;
1271 static int __init clk_ignore_unused_setup(char *__unused)
1272 {
1273         clk_ignore_unused = true;
1274         return 1;
1275 }
1276 __setup("clk_ignore_unused", clk_ignore_unused_setup);
1277 
1278 static int clk_disable_unused(void)
1279 {
1280         struct clk_core *core;
1281 
1282         if (clk_ignore_unused) {
1283                 pr_warn("clk: Not disabling unused clocks\n");
1284                 return 0;
1285         }
1286 
1287         clk_prepare_lock();
1288 
1289         hlist_for_each_entry(core, &clk_root_list, child_node)
1290                 clk_disable_unused_subtree(core);
1291 
1292         hlist_for_each_entry(core, &clk_orphan_list, child_node)
1293                 clk_disable_unused_subtree(core);
1294 
1295         hlist_for_each_entry(core, &clk_root_list, child_node)
1296                 clk_unprepare_unused_subtree(core);
1297 
1298         hlist_for_each_entry(core, &clk_orphan_list, child_node)
1299                 clk_unprepare_unused_subtree(core);
1300 
1301         clk_prepare_unlock();
1302 
1303         return 0;
1304 }
1305 late_initcall_sync(clk_disable_unused);
1306 
1307 static int clk_core_determine_round_nolock(struct clk_core *core,
1308                                            struct clk_rate_request *req)
1309 {
1310         long rate;
1311 
1312         lockdep_assert_held(&prepare_lock);
1313 
1314         if (!core)
1315                 return 0;
1316 
1317         /*
1318          * At this point, core protection will be disabled if
1319          * - if the provider is not protected at all
1320          * - if the calling consumer is the only one which has exclusivity
1321          *   over the provider
1322          */
1323         if (clk_core_rate_is_protected(core)) {
1324                 req->rate = core->rate;
1325         } else if (core->ops->determine_rate) {
1326                 return core->ops->determine_rate(core->hw, req);
1327         } else if (core->ops->round_rate) {
1328                 rate = core->ops->round_rate(core->hw, req->rate,
1329                                              &req->best_parent_rate);
1330                 if (rate < 0)
1331                         return rate;
1332 
1333                 req->rate = rate;
1334         } else {
1335                 return -EINVAL;
1336         }
1337 
1338         return 0;
1339 }
1340 
1341 static void clk_core_init_rate_req(struct clk_core * const core,
1342                                    struct clk_rate_request *req)
1343 {
1344         struct clk_core *parent;
1345 
1346         if (WARN_ON(!core || !req))
1347                 return;
1348 
1349         parent = core->parent;
1350         if (parent) {
1351                 req->best_parent_hw = parent->hw;
1352                 req->best_parent_rate = parent->rate;
1353         } else {
1354                 req->best_parent_hw = NULL;
1355                 req->best_parent_rate = 0;
1356         }
1357 }
1358 
1359 static bool clk_core_can_round(struct clk_core * const core)
1360 {
1361         return core->ops->determine_rate || core->ops->round_rate;
1362 }
1363 
1364 static int clk_core_round_rate_nolock(struct clk_core *core,
1365                                       struct clk_rate_request *req)
1366 {
1367         lockdep_assert_held(&prepare_lock);
1368 
1369         if (!core) {
1370                 req->rate = 0;
1371                 return 0;
1372         }
1373 
1374         clk_core_init_rate_req(core, req);
1375 
1376         if (clk_core_can_round(core))
1377                 return clk_core_determine_round_nolock(core, req);
1378         else if (core->flags & CLK_SET_RATE_PARENT)
1379                 return clk_core_round_rate_nolock(core->parent, req);
1380 
1381         req->rate = core->rate;
1382         return 0;
1383 }
1384 
1385 /**
1386  * __clk_determine_rate - get the closest rate actually supported by a clock
1387  * @hw: determine the rate of this clock
1388  * @req: target rate request
1389  *
1390  * Useful for clk_ops such as .set_rate and .determine_rate.
1391  */
1392 int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1393 {
1394         if (!hw) {
1395                 req->rate = 0;
1396                 return 0;
1397         }
1398 
1399         return clk_core_round_rate_nolock(hw->core, req);
1400 }
1401 EXPORT_SYMBOL_GPL(__clk_determine_rate);
1402 
1403 unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1404 {
1405         int ret;
1406         struct clk_rate_request req;
1407 
1408         clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1409         req.rate = rate;
1410 
1411         ret = clk_core_round_rate_nolock(hw->core, &req);
1412         if (ret)
1413                 return 0;
1414 
1415         return req.rate;
1416 }
1417 EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1418 
1419 /**
1420  * clk_round_rate - round the given rate for a clk
1421  * @clk: the clk for which we are rounding a rate
1422  * @rate: the rate which is to be rounded
1423  *
1424  * Takes in a rate as input and rounds it to a rate that the clk can actually
1425  * use which is then returned.  If clk doesn't support round_rate operation
1426  * then the parent rate is returned.
1427  */
1428 long clk_round_rate(struct clk *clk, unsigned long rate)
1429 {
1430         struct clk_rate_request req;
1431         int ret;
1432 
1433         if (!clk)
1434                 return 0;
1435 
1436         clk_prepare_lock();
1437 
1438         if (clk->exclusive_count)
1439                 clk_core_rate_unprotect(clk->core);
1440 
1441         clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1442         req.rate = rate;
1443 
1444         ret = clk_core_round_rate_nolock(clk->core, &req);
1445 
1446         if (clk->exclusive_count)
1447                 clk_core_rate_protect(clk->core);
1448 
1449         clk_prepare_unlock();
1450 
1451         if (ret)
1452                 return ret;
1453 
1454         return req.rate;
1455 }
1456 EXPORT_SYMBOL_GPL(clk_round_rate);
1457 
1458 /**
1459  * __clk_notify - call clk notifier chain
1460  * @core: clk that is changing rate
1461  * @msg: clk notifier type (see include/linux/clk.h)
1462  * @old_rate: old clk rate
1463  * @new_rate: new clk rate
1464  *
1465  * Triggers a notifier call chain on the clk rate-change notification
1466  * for 'clk'.  Passes a pointer to the struct clk and the previous
1467  * and current rates to the notifier callback.  Intended to be called by
1468  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1469  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1470  * a driver returns that.
1471  */
1472 static int __clk_notify(struct clk_core *core, unsigned long msg,
1473                 unsigned long old_rate, unsigned long new_rate)
1474 {
1475         struct clk_notifier *cn;
1476         struct clk_notifier_data cnd;
1477         int ret = NOTIFY_DONE;
1478 
1479         cnd.old_rate = old_rate;
1480         cnd.new_rate = new_rate;
1481 
1482         list_for_each_entry(cn, &clk_notifier_list, node) {
1483                 if (cn->clk->core == core) {
1484                         cnd.clk = cn->clk;
1485                         ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1486                                         &cnd);
1487                         if (ret & NOTIFY_STOP_MASK)
1488                                 return ret;
1489                 }
1490         }
1491 
1492         return ret;
1493 }
1494 
1495 /**
1496  * __clk_recalc_accuracies
1497  * @core: first clk in the subtree
1498  *
1499  * Walks the subtree of clks starting with clk and recalculates accuracies as
1500  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1501  * callback then it is assumed that the clock will take on the accuracy of its
1502  * parent.
1503  */
1504 static void __clk_recalc_accuracies(struct clk_core *core)
1505 {
1506         unsigned long parent_accuracy = 0;
1507         struct clk_core *child;
1508 
1509         lockdep_assert_held(&prepare_lock);
1510 
1511         if (core->parent)
1512                 parent_accuracy = core->parent->accuracy;
1513 
1514         if (core->ops->recalc_accuracy)
1515                 core->accuracy = core->ops->recalc_accuracy(core->hw,
1516                                                           parent_accuracy);
1517         else
1518                 core->accuracy = parent_accuracy;
1519 
1520         hlist_for_each_entry(child, &core->children, child_node)
1521                 __clk_recalc_accuracies(child);
1522 }
1523 
1524 static long clk_core_get_accuracy(struct clk_core *core)
1525 {
1526         unsigned long accuracy;
1527 
1528         clk_prepare_lock();
1529         if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1530                 __clk_recalc_accuracies(core);
1531 
1532         accuracy = __clk_get_accuracy(core);
1533         clk_prepare_unlock();
1534 
1535         return accuracy;
1536 }
1537 
1538 /**
1539  * clk_get_accuracy - return the accuracy of clk
1540  * @clk: the clk whose accuracy is being returned
1541  *
1542  * Simply returns the cached accuracy of the clk, unless
1543  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1544  * issued.
1545  * If clk is NULL then returns 0.
1546  */
1547 long clk_get_accuracy(struct clk *clk)
1548 {
1549         if (!clk)
1550                 return 0;
1551 
1552         return clk_core_get_accuracy(clk->core);
1553 }
1554 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1555 
1556 static unsigned long clk_recalc(struct clk_core *core,
1557                                 unsigned long parent_rate)
1558 {
1559         unsigned long rate = parent_rate;
1560 
1561         if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1562                 rate = core->ops->recalc_rate(core->hw, parent_rate);
1563                 clk_pm_runtime_put(core);
1564         }
1565         return rate;
1566 }
1567 
1568 /**
1569  * __clk_recalc_rates
1570  * @core: first clk in the subtree
1571  * @msg: notification type (see include/linux/clk.h)
1572  *
1573  * Walks the subtree of clks starting with clk and recalculates rates as it
1574  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1575  * it is assumed that the clock will take on the rate of its parent.
1576  *
1577  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1578  * if necessary.
1579  */
1580 static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1581 {
1582         unsigned long old_rate;
1583         unsigned long parent_rate = 0;
1584         struct clk_core *child;
1585 
1586         lockdep_assert_held(&prepare_lock);
1587 
1588         old_rate = core->rate;
1589 
1590         if (core->parent)
1591                 parent_rate = core->parent->rate;
1592 
1593         core->rate = clk_recalc(core, parent_rate);
1594 
1595         /*
1596          * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1597          * & ABORT_RATE_CHANGE notifiers
1598          */
1599         if (core->notifier_count && msg)
1600                 __clk_notify(core, msg, old_rate, core->rate);
1601 
1602         hlist_for_each_entry(child, &core->children, child_node)
1603                 __clk_recalc_rates(child, msg);
1604 }
1605 
1606 static unsigned long clk_core_get_rate(struct clk_core *core)
1607 {
1608         unsigned long rate;
1609 
1610         clk_prepare_lock();
1611 
1612         if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1613                 __clk_recalc_rates(core, 0);
1614 
1615         rate = clk_core_get_rate_nolock(core);
1616         clk_prepare_unlock();
1617 
1618         return rate;
1619 }
1620 
1621 /**
1622  * clk_get_rate - return the rate of clk
1623  * @clk: the clk whose rate is being returned
1624  *
1625  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1626  * is set, which means a recalc_rate will be issued.
1627  * If clk is NULL then returns 0.
1628  */
1629 unsigned long clk_get_rate(struct clk *clk)
1630 {
1631         if (!clk)
1632                 return 0;
1633 
1634         return clk_core_get_rate(clk->core);
1635 }
1636 EXPORT_SYMBOL_GPL(clk_get_rate);
1637 
1638 static int clk_fetch_parent_index(struct clk_core *core,
1639                                   struct clk_core *parent)
1640 {
1641         int i;
1642 
1643         if (!parent)
1644                 return -EINVAL;
1645 
1646         for (i = 0; i < core->num_parents; i++) {
1647                 /* Found it first try! */
1648                 if (core->parents[i].core == parent)
1649                         return i;
1650 
1651                 /* Something else is here, so keep looking */
1652                 if (core->parents[i].core)
1653                         continue;
1654 
1655                 /* Maybe core hasn't been cached but the hw is all we know? */
1656                 if (core->parents[i].hw) {
1657                         if (core->parents[i].hw == parent->hw)
1658                                 break;
1659 
1660                         /* Didn't match, but we're expecting a clk_hw */
1661                         continue;
1662                 }
1663 
1664                 /* Maybe it hasn't been cached (clk_set_parent() path) */
1665                 if (parent == clk_core_get(core, i))
1666                         break;
1667 
1668                 /* Fallback to comparing globally unique names */
1669                 if (core->parents[i].name &&
1670                     !strcmp(parent->name, core->parents[i].name))
1671                         break;
1672         }
1673 
1674         if (i == core->num_parents)
1675                 return -EINVAL;
1676 
1677         core->parents[i].core = parent;
1678         return i;
1679 }
1680 
1681 /*
1682  * Update the orphan status of @core and all its children.
1683  */
1684 static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1685 {
1686         struct clk_core *child;
1687 
1688         core->orphan = is_orphan;
1689 
1690         hlist_for_each_entry(child, &core->children, child_node)
1691                 clk_core_update_orphan_status(child, is_orphan);
1692 }
1693 
1694 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1695 {
1696         bool was_orphan = core->orphan;
1697 
1698         hlist_del(&core->child_node);
1699 
1700         if (new_parent) {
1701                 bool becomes_orphan = new_parent->orphan;
1702 
1703                 /* avoid duplicate POST_RATE_CHANGE notifications */
1704                 if (new_parent->new_child == core)
1705                         new_parent->new_child = NULL;
1706 
1707                 hlist_add_head(&core->child_node, &new_parent->children);
1708 
1709                 if (was_orphan != becomes_orphan)
1710                         clk_core_update_orphan_status(core, becomes_orphan);
1711         } else {
1712                 hlist_add_head(&core->child_node, &clk_orphan_list);
1713                 if (!was_orphan)
1714                         clk_core_update_orphan_status(core, true);
1715         }
1716 
1717         core->parent = new_parent;
1718 }
1719 
1720 static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1721                                            struct clk_core *parent)
1722 {
1723         unsigned long flags;
1724         struct clk_core *old_parent = core->parent;
1725 
1726         /*
1727          * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1728          *
1729          * 2. Migrate prepare state between parents and prevent race with
1730          * clk_enable().
1731          *
1732          * If the clock is not prepared, then a race with
1733          * clk_enable/disable() is impossible since we already have the
1734          * prepare lock (future calls to clk_enable() need to be preceded by
1735          * a clk_prepare()).
1736          *
1737          * If the clock is prepared, migrate the prepared state to the new
1738          * parent and also protect against a race with clk_enable() by
1739          * forcing the clock and the new parent on.  This ensures that all
1740          * future calls to clk_enable() are practically NOPs with respect to
1741          * hardware and software states.
1742          *
1743          * See also: Comment for clk_set_parent() below.
1744          */
1745 
1746         /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1747         if (core->flags & CLK_OPS_PARENT_ENABLE) {
1748                 clk_core_prepare_enable(old_parent);
1749                 clk_core_prepare_enable(parent);
1750         }
1751 
1752         /* migrate prepare count if > 0 */
1753         if (core->prepare_count) {
1754                 clk_core_prepare_enable(parent);
1755                 clk_core_enable_lock(core);
1756         }
1757 
1758         /* update the clk tree topology */
1759         flags = clk_enable_lock();
1760         clk_reparent(core, parent);
1761         clk_enable_unlock(flags);
1762 
1763         return old_parent;
1764 }
1765 
1766 static void __clk_set_parent_after(struct clk_core *core,
1767                                    struct clk_core *parent,
1768                                    struct clk_core *old_parent)
1769 {
1770         /*
1771          * Finish the migration of prepare state and undo the changes done
1772          * for preventing a race with clk_enable().
1773          */
1774         if (core->prepare_count) {
1775                 clk_core_disable_lock(core);
1776                 clk_core_disable_unprepare(old_parent);
1777         }
1778 
1779         /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1780         if (core->flags & CLK_OPS_PARENT_ENABLE) {
1781                 clk_core_disable_unprepare(parent);
1782                 clk_core_disable_unprepare(old_parent);
1783         }
1784 }
1785 
1786 static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1787                             u8 p_index)
1788 {
1789         unsigned long flags;
1790         int ret = 0;
1791         struct clk_core *old_parent;
1792 
1793         old_parent = __clk_set_parent_before(core, parent);
1794 
1795         trace_clk_set_parent(core, parent);
1796 
1797         /* change clock input source */
1798         if (parent && core->ops->set_parent)
1799                 ret = core->ops->set_parent(core->hw, p_index);
1800 
1801         trace_clk_set_parent_complete(core, parent);
1802 
1803         if (ret) {
1804                 flags = clk_enable_lock();
1805                 clk_reparent(core, old_parent);
1806                 clk_enable_unlock(flags);
1807                 __clk_set_parent_after(core, old_parent, parent);
1808 
1809                 return ret;
1810         }
1811 
1812         __clk_set_parent_after(core, parent, old_parent);
1813 
1814         return 0;
1815 }
1816 
1817 /**
1818  * __clk_speculate_rates
1819  * @core: first clk in the subtree
1820  * @parent_rate: the "future" rate of clk's parent
1821  *
1822  * Walks the subtree of clks starting with clk, speculating rates as it
1823  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1824  *
1825  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1826  * pre-rate change notifications and returns early if no clks in the
1827  * subtree have subscribed to the notifications.  Note that if a clk does not
1828  * implement the .recalc_rate callback then it is assumed that the clock will
1829  * take on the rate of its parent.
1830  */
1831 static int __clk_speculate_rates(struct clk_core *core,
1832                                  unsigned long parent_rate)
1833 {
1834         struct clk_core *child;
1835         unsigned long new_rate;
1836         int ret = NOTIFY_DONE;
1837 
1838         lockdep_assert_held(&prepare_lock);
1839 
1840         new_rate = clk_recalc(core, parent_rate);
1841 
1842         /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1843         if (core->notifier_count)
1844                 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1845 
1846         if (ret & NOTIFY_STOP_MASK) {
1847                 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1848                                 __func__, core->name, ret);
1849                 goto out;
1850         }
1851 
1852         hlist_for_each_entry(child, &core->children, child_node) {
1853                 ret = __clk_speculate_rates(child, new_rate);
1854                 if (ret & NOTIFY_STOP_MASK)
1855                         break;
1856         }
1857 
1858 out:
1859         return ret;
1860 }
1861 
1862 static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1863                              struct clk_core *new_parent, u8 p_index)
1864 {
1865         struct clk_core *child;
1866 
1867         core->new_rate = new_rate;
1868         core->new_parent = new_parent;
1869         core->new_parent_index = p_index;
1870         /* include clk in new parent's PRE_RATE_CHANGE notifications */
1871         core->new_child = NULL;
1872         if (new_parent && new_parent != core->parent)
1873                 new_parent->new_child = core;
1874 
1875         hlist_for_each_entry(child, &core->children, child_node) {
1876                 child->new_rate = clk_recalc(child, new_rate);
1877                 clk_calc_subtree(child, child->new_rate, NULL, 0);
1878         }
1879 }
1880 
1881 /*
1882  * calculate the new rates returning the topmost clock that has to be
1883  * changed.
1884  */
1885 static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1886                                            unsigned long rate)
1887 {
1888         struct clk_core *top = core;
1889         struct clk_core *old_parent, *parent;
1890         unsigned long best_parent_rate = 0;
1891         unsigned long new_rate;
1892         unsigned long min_rate;
1893         unsigned long max_rate;
1894         int p_index = 0;
1895         long ret;
1896 
1897         /* sanity */
1898         if (IS_ERR_OR_NULL(core))
1899                 return NULL;
1900 
1901         /* save parent rate, if it exists */
1902         parent = old_parent = core->parent;
1903         if (parent)
1904                 best_parent_rate = parent->rate;
1905 
1906         clk_core_get_boundaries(core, &min_rate, &max_rate);
1907 
1908         /* find the closest rate and parent clk/rate */
1909         if (clk_core_can_round(core)) {
1910                 struct clk_rate_request req;
1911 
1912                 req.rate = rate;
1913                 req.min_rate = min_rate;
1914                 req.max_rate = max_rate;
1915 
1916                 clk_core_init_rate_req(core, &req);
1917 
1918                 ret = clk_core_determine_round_nolock(core, &req);
1919                 if (ret < 0)
1920                         return NULL;
1921 
1922                 best_parent_rate = req.best_parent_rate;
1923                 new_rate = req.rate;
1924                 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1925 
1926                 if (new_rate < min_rate || new_rate > max_rate)
1927                         return NULL;
1928         } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1929                 /* pass-through clock without adjustable parent */
1930                 core->new_rate = core->rate;
1931                 return NULL;
1932         } else {
1933                 /* pass-through clock with adjustable parent */
1934                 top = clk_calc_new_rates(parent, rate);
1935                 new_rate = parent->new_rate;
1936                 goto out;
1937         }
1938 
1939         /* some clocks must be gated to change parent */
1940         if (parent != old_parent &&
1941             (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1942                 pr_debug("%s: %s not gated but wants to reparent\n",
1943                          __func__, core->name);
1944                 return NULL;
1945         }
1946 
1947         /* try finding the new parent index */
1948         if (parent && core->num_parents > 1) {
1949                 p_index = clk_fetch_parent_index(core, parent);
1950                 if (p_index < 0) {
1951                         pr_debug("%s: clk %s can not be parent of clk %s\n",
1952                                  __func__, parent->name, core->name);
1953                         return NULL;
1954                 }
1955         }
1956 
1957         if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1958             best_parent_rate != parent->rate)
1959                 top = clk_calc_new_rates(parent, best_parent_rate);
1960 
1961 out:
1962         clk_calc_subtree(core, new_rate, parent, p_index);
1963 
1964         return top;
1965 }
1966 
1967 /*
1968  * Notify about rate changes in a subtree. Always walk down the whole tree
1969  * so that in case of an error we can walk down the whole tree again and
1970  * abort the change.
1971  */
1972 static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1973                                                   unsigned long event)
1974 {
1975         struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1976         int ret = NOTIFY_DONE;
1977 
1978         if (core->rate == core->new_rate)
1979                 return NULL;
1980 
1981         if (core->notifier_count) {
1982                 ret = __clk_notify(core, event, core->rate, core->new_rate);
1983                 if (ret & NOTIFY_STOP_MASK)
1984                         fail_clk = core;
1985         }
1986 
1987         hlist_for_each_entry(child, &core->children, child_node) {
1988                 /* Skip children who will be reparented to another clock */
1989                 if (child->new_parent && child->new_parent != core)
1990                         continue;
1991                 tmp_clk = clk_propagate_rate_change(child, event);
1992                 if (tmp_clk)
1993                         fail_clk = tmp_clk;
1994         }
1995 
1996         /* handle the new child who might not be in core->children yet */
1997         if (core->new_child) {
1998                 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1999                 if (tmp_clk)
2000                         fail_clk = tmp_clk;
2001         }
2002 
2003         return fail_clk;
2004 }
2005 
2006 /*
2007  * walk down a subtree and set the new rates notifying the rate
2008  * change on the way
2009  */
2010 static void clk_change_rate(struct clk_core *core)
2011 {
2012         struct clk_core *child;
2013         struct hlist_node *tmp;
2014         unsigned long old_rate;
2015         unsigned long best_parent_rate = 0;
2016         bool skip_set_rate = false;
2017         struct clk_core *old_parent;
2018         struct clk_core *parent = NULL;
2019 
2020         old_rate = core->rate;
2021 
2022         if (core->new_parent) {
2023                 parent = core->new_parent;
2024                 best_parent_rate = core->new_parent->rate;
2025         } else if (core->parent) {
2026                 parent = core->parent;
2027                 best_parent_rate = core->parent->rate;
2028         }
2029 
2030         if (clk_pm_runtime_get(core))
2031                 return;
2032 
2033         if (core->flags & CLK_SET_RATE_UNGATE) {
2034                 unsigned long flags;
2035 
2036                 clk_core_prepare(core);
2037                 flags = clk_enable_lock();
2038                 clk_core_enable(core);
2039                 clk_enable_unlock(flags);
2040         }
2041 
2042         if (core->new_parent && core->new_parent != core->parent) {
2043                 old_parent = __clk_set_parent_before(core, core->new_parent);
2044                 trace_clk_set_parent(core, core->new_parent);
2045 
2046                 if (core->ops->set_rate_and_parent) {
2047                         skip_set_rate = true;
2048                         core->ops->set_rate_and_parent(core->hw, core->new_rate,
2049                                         best_parent_rate,
2050                                         core->new_parent_index);
2051                 } else if (core->ops->set_parent) {
2052                         core->ops->set_parent(core->hw, core->new_parent_index);
2053                 }
2054 
2055                 trace_clk_set_parent_complete(core, core->new_parent);
2056                 __clk_set_parent_after(core, core->new_parent, old_parent);
2057         }
2058 
2059         if (core->flags & CLK_OPS_PARENT_ENABLE)
2060                 clk_core_prepare_enable(parent);
2061 
2062         trace_clk_set_rate(core, core->new_rate);
2063 
2064         if (!skip_set_rate && core->ops->set_rate)
2065                 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2066 
2067         trace_clk_set_rate_complete(core, core->new_rate);
2068 
2069         core->rate = clk_recalc(core, best_parent_rate);
2070 
2071         if (core->flags & CLK_SET_RATE_UNGATE) {
2072                 unsigned long flags;
2073 
2074                 flags = clk_enable_lock();
2075                 clk_core_disable(core);
2076                 clk_enable_unlock(flags);
2077                 clk_core_unprepare(core);
2078         }
2079 
2080         if (core->flags & CLK_OPS_PARENT_ENABLE)
2081                 clk_core_disable_unprepare(parent);
2082 
2083         if (core->notifier_count && old_rate != core->rate)
2084                 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2085 
2086         if (core->flags & CLK_RECALC_NEW_RATES)
2087                 (void)clk_calc_new_rates(core, core->new_rate);
2088 
2089         /*
2090          * Use safe iteration, as change_rate can actually swap parents
2091          * for certain clock types.
2092          */
2093         hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2094                 /* Skip children who will be reparented to another clock */
2095                 if (child->new_parent && child->new_parent != core)
2096                         continue;
2097                 clk_change_rate(child);
2098         }
2099 
2100         /* handle the new child who might not be in core->children yet */
2101         if (core->new_child)
2102                 clk_change_rate(core->new_child);
2103 
2104         clk_pm_runtime_put(core);
2105 }
2106 
2107 static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2108                                                      unsigned long req_rate)
2109 {
2110         int ret, cnt;
2111         struct clk_rate_request req;
2112 
2113         lockdep_assert_held(&prepare_lock);
2114 
2115         if (!core)
2116                 return 0;
2117 
2118         /* simulate what the rate would be if it could be freely set */
2119         cnt = clk_core_rate_nuke_protect(core);
2120         if (cnt < 0)
2121                 return cnt;
2122 
2123         clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2124         req.rate = req_rate;
2125 
2126         ret = clk_core_round_rate_nolock(core, &req);
2127 
2128         /* restore the protection */
2129         clk_core_rate_restore_protect(core, cnt);
2130 
2131         return ret ? 0 : req.rate;
2132 }
2133 
2134 static int clk_core_set_rate_nolock(struct clk_core *core,
2135                                     unsigned long req_rate)
2136 {
2137         struct clk_core *top, *fail_clk;
2138         unsigned long rate;
2139         int ret = 0;
2140 
2141         if (!core)
2142                 return 0;
2143 
2144         rate = clk_core_req_round_rate_nolock(core, req_rate);
2145 
2146         /* bail early if nothing to do */
2147         if (rate == clk_core_get_rate_nolock(core))
2148                 return 0;
2149 
2150         /* fail on a direct rate set of a protected provider */
2151         if (clk_core_rate_is_protected(core))
2152                 return -EBUSY;
2153 
2154         /* calculate new rates and get the topmost changed clock */
2155         top = clk_calc_new_rates(core, req_rate);
2156         if (!top)
2157                 return -EINVAL;
2158 
2159         ret = clk_pm_runtime_get(core);
2160         if (ret)
2161                 return ret;
2162 
2163         /* notify that we are about to change rates */
2164         fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2165         if (fail_clk) {
2166                 pr_debug("%s: failed to set %s rate\n", __func__,
2167                                 fail_clk->name);
2168                 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2169                 ret = -EBUSY;
2170                 goto err;
2171         }
2172 
2173         /* change the rates */
2174         clk_change_rate(top);
2175 
2176         core->req_rate = req_rate;
2177 err:
2178         clk_pm_runtime_put(core);
2179 
2180         return ret;
2181 }
2182 
2183 /**
2184  * clk_set_rate - specify a new rate for clk
2185  * @clk: the clk whose rate is being changed
2186  * @rate: the new rate for clk
2187  *
2188  * In the simplest case clk_set_rate will only adjust the rate of clk.
2189  *
2190  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2191  * propagate up to clk's parent; whether or not this happens depends on the
2192  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
2193  * after calling .round_rate then upstream parent propagation is ignored.  If
2194  * *parent_rate comes back with a new rate for clk's parent then we propagate
2195  * up to clk's parent and set its rate.  Upward propagation will continue
2196  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2197  * .round_rate stops requesting changes to clk's parent_rate.
2198  *
2199  * Rate changes are accomplished via tree traversal that also recalculates the
2200  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2201  *
2202  * Returns 0 on success, -EERROR otherwise.
2203  */
2204 int clk_set_rate(struct clk *clk, unsigned long rate)
2205 {
2206         int ret;
2207 
2208         if (!clk)
2209                 return 0;
2210 
2211         /* prevent racing with updates to the clock topology */
2212         clk_prepare_lock();
2213 
2214         if (clk->exclusive_count)
2215                 clk_core_rate_unprotect(clk->core);
2216 
2217         ret = clk_core_set_rate_nolock(clk->core, rate);
2218 
2219         if (clk->exclusive_count)
2220                 clk_core_rate_protect(clk->core);
2221 
2222         clk_prepare_unlock();
2223 
2224         return ret;
2225 }
2226 EXPORT_SYMBOL_GPL(clk_set_rate);
2227 
2228 /**
2229  * clk_set_rate_exclusive - specify a new rate and get exclusive control
2230  * @clk: the clk whose rate is being changed
2231  * @rate: the new rate for clk
2232  *
2233  * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2234  * within a critical section
2235  *
2236  * This can be used initially to ensure that at least 1 consumer is
2237  * satisfied when several consumers are competing for exclusivity over the
2238  * same clock provider.
2239  *
2240  * The exclusivity is not applied if setting the rate failed.
2241  *
2242  * Calls to clk_rate_exclusive_get() should be balanced with calls to
2243  * clk_rate_exclusive_put().
2244  *
2245  * Returns 0 on success, -EERROR otherwise.
2246  */
2247 int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2248 {
2249         int ret;
2250 
2251         if (!clk)
2252                 return 0;
2253 
2254         /* prevent racing with updates to the clock topology */
2255         clk_prepare_lock();
2256 
2257         /*
2258          * The temporary protection removal is not here, on purpose
2259          * This function is meant to be used instead of clk_rate_protect,
2260          * so before the consumer code path protect the clock provider
2261          */
2262 
2263         ret = clk_core_set_rate_nolock(clk->core, rate);
2264         if (!ret) {
2265                 clk_core_rate_protect(clk->core);
2266                 clk->exclusive_count++;
2267         }
2268 
2269         clk_prepare_unlock();
2270 
2271         return ret;
2272 }
2273 EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2274 
2275 /**
2276  * clk_set_rate_range - set a rate range for a clock source
2277  * @clk: clock source
2278  * @min: desired minimum clock rate in Hz, inclusive
2279  * @max: desired maximum clock rate in Hz, inclusive
2280  *
2281  * Returns success (0) or negative errno.
2282  */
2283 int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2284 {
2285         int ret = 0;
2286         unsigned long old_min, old_max, rate;
2287 
2288         if (!clk)
2289                 return 0;
2290 
2291         if (min > max) {
2292                 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2293                        __func__, clk->core->name, clk->dev_id, clk->con_id,
2294                        min, max);
2295                 return -EINVAL;
2296         }
2297 
2298         clk_prepare_lock();
2299 
2300         if (clk->exclusive_count)
2301                 clk_core_rate_unprotect(clk->core);
2302 
2303         /* Save the current values in case we need to rollback the change */
2304         old_min = clk->min_rate;
2305         old_max = clk->max_rate;
2306         clk->min_rate = min;
2307         clk->max_rate = max;
2308 
2309         rate = clk_core_get_rate_nolock(clk->core);
2310         if (rate < min || rate > max) {
2311                 /*
2312                  * FIXME:
2313                  * We are in bit of trouble here, current rate is outside the
2314                  * the requested range. We are going try to request appropriate
2315                  * range boundary but there is a catch. It may fail for the
2316                  * usual reason (clock broken, clock protected, etc) but also
2317                  * because:
2318                  * - round_rate() was not favorable and fell on the wrong
2319                  *   side of the boundary
2320                  * - the determine_rate() callback does not really check for
2321                  *   this corner case when determining the rate
2322                  */
2323 
2324                 if (rate < min)
2325                         rate = min;
2326                 else
2327                         rate = max;
2328 
2329                 ret = clk_core_set_rate_nolock(clk->core, rate);
2330                 if (ret) {
2331                         /* rollback the changes */
2332                         clk->min_rate = old_min;
2333                         clk->max_rate = old_max;
2334                 }
2335         }
2336 
2337         if (clk->exclusive_count)
2338                 clk_core_rate_protect(clk->core);
2339 
2340         clk_prepare_unlock();
2341 
2342         return ret;
2343 }
2344 EXPORT_SYMBOL_GPL(clk_set_rate_range);
2345 
2346 /**
2347  * clk_set_min_rate - set a minimum clock rate for a clock source
2348  * @clk: clock source
2349  * @rate: desired minimum clock rate in Hz, inclusive
2350  *
2351  * Returns success (0) or negative errno.
2352  */
2353 int clk_set_min_rate(struct clk *clk, unsigned long rate)
2354 {
2355         if (!clk)
2356                 return 0;
2357 
2358         return clk_set_rate_range(clk, rate, clk->max_rate);
2359 }
2360 EXPORT_SYMBOL_GPL(clk_set_min_rate);
2361 
2362 /**
2363  * clk_set_max_rate - set a maximum clock rate for a clock source
2364  * @clk: clock source
2365  * @rate: desired maximum clock rate in Hz, inclusive
2366  *
2367  * Returns success (0) or negative errno.
2368  */
2369 int clk_set_max_rate(struct clk *clk, unsigned long rate)
2370 {
2371         if (!clk)
2372                 return 0;
2373 
2374         return clk_set_rate_range(clk, clk->min_rate, rate);
2375 }
2376 EXPORT_SYMBOL_GPL(clk_set_max_rate);
2377 
2378 /**
2379  * clk_get_parent - return the parent of a clk
2380  * @clk: the clk whose parent gets returned
2381  *
2382  * Simply returns clk->parent.  Returns NULL if clk is NULL.
2383  */
2384 struct clk *clk_get_parent(struct clk *clk)
2385 {
2386         struct clk *parent;
2387 
2388         if (!clk)
2389                 return NULL;
2390 
2391         clk_prepare_lock();
2392         /* TODO: Create a per-user clk and change callers to call clk_put */
2393         parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2394         clk_prepare_unlock();
2395 
2396         return parent;
2397 }
2398 EXPORT_SYMBOL_GPL(clk_get_parent);
2399 
2400 static struct clk_core *__clk_init_parent(struct clk_core *core)
2401 {
2402         u8 index = 0;
2403 
2404         if (core->num_parents > 1 && core->ops->get_parent)
2405                 index = core->ops->get_parent(core->hw);
2406 
2407         return clk_core_get_parent_by_index(core, index);
2408 }
2409 
2410 static void clk_core_reparent(struct clk_core *core,
2411                                   struct clk_core *new_parent)
2412 {
2413         clk_reparent(core, new_parent);
2414         __clk_recalc_accuracies(core);
2415         __clk_recalc_rates(core, POST_RATE_CHANGE);
2416 }
2417 
2418 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2419 {
2420         if (!hw)
2421                 return;
2422 
2423         clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2424 }
2425 
2426 /**
2427  * clk_has_parent - check if a clock is a possible parent for another
2428  * @clk: clock source
2429  * @parent: parent clock source
2430  *
2431  * This function can be used in drivers that need to check that a clock can be
2432  * the parent of another without actually changing the parent.
2433  *
2434  * Returns true if @parent is a possible parent for @clk, false otherwise.
2435  */
2436 bool clk_has_parent(struct clk *clk, struct clk *parent)
2437 {
2438         struct clk_core *core, *parent_core;
2439         int i;
2440 
2441         /* NULL clocks should be nops, so return success if either is NULL. */
2442         if (!clk || !parent)
2443                 return true;
2444 
2445         core = clk->core;
2446         parent_core = parent->core;
2447 
2448         /* Optimize for the case where the parent is already the parent. */
2449         if (core->parent == parent_core)
2450                 return true;
2451 
2452         for (i = 0; i < core->num_parents; i++)
2453                 if (!strcmp(core->parents[i].name, parent_core->name))
2454                         return true;
2455 
2456         return false;
2457 }
2458 EXPORT_SYMBOL_GPL(clk_has_parent);
2459 
2460 static int clk_core_set_parent_nolock(struct clk_core *core,
2461                                       struct clk_core *parent)
2462 {
2463         int ret = 0;
2464         int p_index = 0;
2465         unsigned long p_rate = 0;
2466 
2467         lockdep_assert_held(&prepare_lock);
2468 
2469         if (!core)
2470                 return 0;
2471 
2472         if (core->parent == parent)
2473                 return 0;
2474 
2475         /* verify ops for multi-parent clks */
2476         if (core->num_parents > 1 && !core->ops->set_parent)
2477                 return -EPERM;
2478 
2479         /* check that we are allowed to re-parent if the clock is in use */
2480         if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2481                 return -EBUSY;
2482 
2483         if (clk_core_rate_is_protected(core))
2484                 return -EBUSY;
2485 
2486         /* try finding the new parent index */
2487         if (parent) {
2488                 p_index = clk_fetch_parent_index(core, parent);
2489                 if (p_index < 0) {
2490                         pr_debug("%s: clk %s can not be parent of clk %s\n",
2491                                         __func__, parent->name, core->name);
2492                         return p_index;
2493                 }
2494                 p_rate = parent->rate;
2495         }
2496 
2497         ret = clk_pm_runtime_get(core);
2498         if (ret)
2499                 return ret;
2500 
2501         /* propagate PRE_RATE_CHANGE notifications */
2502         ret = __clk_speculate_rates(core, p_rate);
2503 
2504         /* abort if a driver objects */
2505         if (ret & NOTIFY_STOP_MASK)
2506                 goto runtime_put;
2507 
2508         /* do the re-parent */
2509         ret = __clk_set_parent(core, parent, p_index);
2510 
2511         /* propagate rate an accuracy recalculation accordingly */
2512         if (ret) {
2513                 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2514         } else {
2515                 __clk_recalc_rates(core, POST_RATE_CHANGE);
2516                 __clk_recalc_accuracies(core);
2517         }
2518 
2519 runtime_put:
2520         clk_pm_runtime_put(core);
2521 
2522         return ret;
2523 }
2524 
2525 int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2526 {
2527         return clk_core_set_parent_nolock(hw->core, parent->core);
2528 }
2529 EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2530 
2531 /**
2532  * clk_set_parent - switch the parent of a mux clk
2533  * @clk: the mux clk whose input we are switching
2534  * @parent: the new input to clk
2535  *
2536  * Re-parent clk to use parent as its new input source.  If clk is in
2537  * prepared state, the clk will get enabled for the duration of this call. If
2538  * that's not acceptable for a specific clk (Eg: the consumer can't handle
2539  * that, the reparenting is glitchy in hardware, etc), use the
2540  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2541  *
2542  * After successfully changing clk's parent clk_set_parent will update the
2543  * clk topology, sysfs topology and propagate rate recalculation via
2544  * __clk_recalc_rates.
2545  *
2546  * Returns 0 on success, -EERROR otherwise.
2547  */
2548 int clk_set_parent(struct clk *clk, struct clk *parent)
2549 {
2550         int ret;
2551 
2552         if (!clk)
2553                 return 0;
2554 
2555         clk_prepare_lock();
2556 
2557         if (clk->exclusive_count)
2558                 clk_core_rate_unprotect(clk->core);
2559 
2560         ret = clk_core_set_parent_nolock(clk->core,
2561                                          parent ? parent->core : NULL);
2562 
2563         if (clk->exclusive_count)
2564                 clk_core_rate_protect(clk->core);
2565 
2566         clk_prepare_unlock();
2567 
2568         return ret;
2569 }
2570 EXPORT_SYMBOL_GPL(clk_set_parent);
2571 
2572 static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2573 {
2574         int ret = -EINVAL;
2575 
2576         lockdep_assert_held(&prepare_lock);
2577 
2578         if (!core)
2579                 return 0;
2580 
2581         if (clk_core_rate_is_protected(core))
2582                 return -EBUSY;
2583 
2584         trace_clk_set_phase(core, degrees);
2585 
2586         if (core->ops->set_phase) {
2587                 ret = core->ops->set_phase(core->hw, degrees);
2588                 if (!ret)
2589                         core->phase = degrees;
2590         }
2591 
2592         trace_clk_set_phase_complete(core, degrees);
2593 
2594         return ret;
2595 }
2596 
2597 /**
2598  * clk_set_phase - adjust the phase shift of a clock signal
2599  * @clk: clock signal source
2600  * @degrees: number of degrees the signal is shifted
2601  *
2602  * Shifts the phase of a clock signal by the specified
2603  * degrees. Returns 0 on success, -EERROR otherwise.
2604  *
2605  * This function makes no distinction about the input or reference
2606  * signal that we adjust the clock signal phase against. For example
2607  * phase locked-loop clock signal generators we may shift phase with
2608  * respect to feedback clock signal input, but for other cases the
2609  * clock phase may be shifted with respect to some other, unspecified
2610  * signal.
2611  *
2612  * Additionally the concept of phase shift does not propagate through
2613  * the clock tree hierarchy, which sets it apart from clock rates and
2614  * clock accuracy. A parent clock phase attribute does not have an
2615  * impact on the phase attribute of a child clock.
2616  */
2617 int clk_set_phase(struct clk *clk, int degrees)
2618 {
2619         int ret;
2620 
2621         if (!clk)
2622                 return 0;
2623 
2624         /* sanity check degrees */
2625         degrees %= 360;
2626         if (degrees < 0)
2627                 degrees += 360;
2628 
2629         clk_prepare_lock();
2630 
2631         if (clk->exclusive_count)
2632                 clk_core_rate_unprotect(clk->core);
2633 
2634         ret = clk_core_set_phase_nolock(clk->core, degrees);
2635 
2636         if (clk->exclusive_count)
2637                 clk_core_rate_protect(clk->core);
2638 
2639         clk_prepare_unlock();
2640 
2641         return ret;
2642 }
2643 EXPORT_SYMBOL_GPL(clk_set_phase);
2644 
2645 static int clk_core_get_phase(struct clk_core *core)
2646 {
2647         int ret;
2648 
2649         lockdep_assert_held(&prepare_lock);
2650         if (!core->ops->get_phase)
2651                 return 0;
2652 
2653         /* Always try to update cached phase if possible */
2654         ret = core->ops->get_phase(core->hw);
2655         if (ret >= 0)
2656                 core->phase = ret;
2657 
2658         return ret;
2659 }
2660 
2661 /**
2662  * clk_get_phase - return the phase shift of a clock signal
2663  * @clk: clock signal source
2664  *
2665  * Returns the phase shift of a clock node in degrees, otherwise returns
2666  * -EERROR.
2667  */
2668 int clk_get_phase(struct clk *clk)
2669 {
2670         int ret;
2671 
2672         if (!clk)
2673                 return 0;
2674 
2675         clk_prepare_lock();
2676         ret = clk_core_get_phase(clk->core);
2677         clk_prepare_unlock();
2678 
2679         return ret;
2680 }
2681 EXPORT_SYMBOL_GPL(clk_get_phase);
2682 
2683 static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2684 {
2685         /* Assume a default value of 50% */
2686         core->duty.num = 1;
2687         core->duty.den = 2;
2688 }
2689 
2690 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2691 
2692 static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2693 {
2694         struct clk_duty *duty = &core->duty;
2695         int ret = 0;
2696 
2697         if (!core->ops->get_duty_cycle)
2698                 return clk_core_update_duty_cycle_parent_nolock(core);
2699 
2700         ret = core->ops->get_duty_cycle(core->hw, duty);
2701         if (ret)
2702                 goto reset;
2703 
2704         /* Don't trust the clock provider too much */
2705         if (duty->den == 0 || duty->num > duty->den) {
2706                 ret = -EINVAL;
2707                 goto reset;
2708         }
2709 
2710         return 0;
2711 
2712 reset:
2713         clk_core_reset_duty_cycle_nolock(core);
2714         return ret;
2715 }
2716 
2717 static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2718 {
2719         int ret = 0;
2720 
2721         if (core->parent &&
2722             core->flags & CLK_DUTY_CYCLE_PARENT) {
2723                 ret = clk_core_update_duty_cycle_nolock(core->parent);
2724                 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2725         } else {
2726                 clk_core_reset_duty_cycle_nolock(core);
2727         }
2728 
2729         return ret;
2730 }
2731 
2732 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2733                                                  struct clk_duty *duty);
2734 
2735 static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2736                                           struct clk_duty *duty)
2737 {
2738         int ret;
2739 
2740         lockdep_assert_held(&prepare_lock);
2741 
2742         if (clk_core_rate_is_protected(core))
2743                 return -EBUSY;
2744 
2745         trace_clk_set_duty_cycle(core, duty);
2746 
2747         if (!core->ops->set_duty_cycle)
2748                 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2749 
2750         ret = core->ops->set_duty_cycle(core->hw, duty);
2751         if (!ret)
2752                 memcpy(&core->duty, duty, sizeof(*duty));
2753 
2754         trace_clk_set_duty_cycle_complete(core, duty);
2755 
2756         return ret;
2757 }
2758 
2759 static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2760                                                  struct clk_duty *duty)
2761 {
2762         int ret = 0;
2763 
2764         if (core->parent &&
2765             core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2766                 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2767                 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2768         }
2769 
2770         return ret;
2771 }
2772 
2773 /**
2774  * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2775  * @clk: clock signal source
2776  * @num: numerator of the duty cycle ratio to be applied
2777  * @den: denominator of the duty cycle ratio to be applied
2778  *
2779  * Apply the duty cycle ratio if the ratio is valid and the clock can
2780  * perform this operation
2781  *
2782  * Returns (0) on success, a negative errno otherwise.
2783  */
2784 int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2785 {
2786         int ret;
2787         struct clk_duty duty;
2788 
2789         if (!clk)
2790                 return 0;
2791 
2792         /* sanity check the ratio */
2793         if (den == 0 || num > den)
2794                 return -EINVAL;
2795 
2796         duty.num = num;
2797         duty.den = den;
2798 
2799         clk_prepare_lock();
2800 
2801         if (clk->exclusive_count)
2802                 clk_core_rate_unprotect(clk->core);
2803 
2804         ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2805 
2806         if (clk->exclusive_count)
2807                 clk_core_rate_protect(clk->core);
2808 
2809         clk_prepare_unlock();
2810 
2811         return ret;
2812 }
2813 EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2814 
2815 static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2816                                           unsigned int scale)
2817 {
2818         struct clk_duty *duty = &core->duty;
2819         int ret;
2820 
2821         clk_prepare_lock();
2822 
2823         ret = clk_core_update_duty_cycle_nolock(core);
2824         if (!ret)
2825                 ret = mult_frac(scale, duty->num, duty->den);
2826 
2827         clk_prepare_unlock();
2828 
2829         return ret;
2830 }
2831 
2832 /**
2833  * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2834  * @clk: clock signal source
2835  * @scale: scaling factor to be applied to represent the ratio as an integer
2836  *
2837  * Returns the duty cycle ratio of a clock node multiplied by the provided
2838  * scaling factor, or negative errno on error.
2839  */
2840 int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2841 {
2842         if (!clk)
2843                 return 0;
2844 
2845         return clk_core_get_scaled_duty_cycle(clk->core, scale);
2846 }
2847 EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2848 
2849 /**
2850  * clk_is_match - check if two clk's point to the same hardware clock
2851  * @p: clk compared against q
2852  * @q: clk compared against p
2853  *
2854  * Returns true if the two struct clk pointers both point to the same hardware
2855  * clock node. Put differently, returns true if struct clk *p and struct clk *q
2856  * share the same struct clk_core object.
2857  *
2858  * Returns false otherwise. Note that two NULL clks are treated as matching.
2859  */
2860 bool clk_is_match(const struct clk *p, const struct clk *q)
2861 {
2862         /* trivial case: identical struct clk's or both NULL */
2863         if (p == q)
2864                 return true;
2865 
2866         /* true if clk->core pointers match. Avoid dereferencing garbage */
2867         if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2868                 if (p->core == q->core)
2869                         return true;
2870 
2871         return false;
2872 }
2873 EXPORT_SYMBOL_GPL(clk_is_match);
2874 
2875 /***        debugfs support        ***/
2876 
2877 #ifdef CONFIG_DEBUG_FS
2878 #include <linux/debugfs.h>
2879 
2880 static struct dentry *rootdir;
2881 static int inited = 0;
2882 static DEFINE_MUTEX(clk_debug_lock);
2883 static HLIST_HEAD(clk_debug_list);
2884 
2885 static struct hlist_head *orphan_list[] = {
2886         &clk_orphan_list,
2887         NULL,
2888 };
2889 
2890 static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2891                                  int level)
2892 {
2893         int phase;
2894 
2895         seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2896                    level * 3 + 1, "",
2897                    30 - level * 3, c->name,
2898                    c->enable_count, c->prepare_count, c->protect_count,
2899                    clk_core_get_rate(c), clk_core_get_accuracy(c));
2900 
2901         phase = clk_core_get_phase(c);
2902         if (phase >= 0)
2903                 seq_printf(s, "%5d", phase);
2904         else
2905                 seq_puts(s, "-----");
2906 
2907         seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
2908 }
2909 
2910 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2911                                      int level)
2912 {
2913         struct clk_core *child;
2914 
2915         clk_summary_show_one(s, c, level);
2916 
2917         hlist_for_each_entry(child, &c->children, child_node)
2918                 clk_summary_show_subtree(s, child, level + 1);
2919 }
2920 
2921 static int clk_summary_show(struct seq_file *s, void *data)
2922 {
2923         struct clk_core *c;
2924         struct hlist_head **lists = (struct hlist_head **)s->private;
2925 
2926         seq_puts(s, "                                 enable  prepare  protect                                duty\n");
2927         seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle\n");
2928         seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2929 
2930         clk_prepare_lock();
2931 
2932         for (; *lists; lists++)
2933                 hlist_for_each_entry(c, *lists, child_node)
2934                         clk_summary_show_subtree(s, c, 0);
2935 
2936         clk_prepare_unlock();
2937 
2938         return 0;
2939 }
2940 DEFINE_SHOW_ATTRIBUTE(clk_summary);
2941 
2942 static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2943 {
2944         int phase;
2945         unsigned long min_rate, max_rate;
2946 
2947         clk_core_get_boundaries(c, &min_rate, &max_rate);
2948 
2949         /* This should be JSON format, i.e. elements separated with a comma */
2950         seq_printf(s, "\"%s\": { ", c->name);
2951         seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2952         seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2953         seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2954         seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2955         seq_printf(s, "\"min_rate\": %lu,", min_rate);
2956         seq_printf(s, "\"max_rate\": %lu,", max_rate);
2957         seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2958         phase = clk_core_get_phase(c);
2959         if (phase >= 0)
2960                 seq_printf(s, "\"phase\": %d,", phase);
2961         seq_printf(s, "\"duty_cycle\": %u",
2962                    clk_core_get_scaled_duty_cycle(c, 100000));
2963 }
2964 
2965 static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2966 {
2967         struct clk_core *child;
2968 
2969         clk_dump_one(s, c, level);
2970 
2971         hlist_for_each_entry(child, &c->children, child_node) {
2972                 seq_putc(s, ',');
2973                 clk_dump_subtree(s, child, level + 1);
2974         }
2975 
2976         seq_putc(s, '}');
2977 }
2978 
2979 static int clk_dump_show(struct seq_file *s, void *data)
2980 {
2981         struct clk_core *c;
2982         bool first_node = true;
2983         struct hlist_head **lists = (struct hlist_head **)s->private;
2984 
2985         seq_putc(s, '{');
2986         clk_prepare_lock();
2987 
2988         for (; *lists; lists++) {
2989                 hlist_for_each_entry(c, *lists, child_node) {
2990                         if (!first_node)
2991                                 seq_putc(s, ',');
2992                         first_node = false;
2993                         clk_dump_subtree(s, c, 0);
2994                 }
2995         }
2996 
2997         clk_prepare_unlock();
2998 
2999         seq_puts(s, "}\n");
3000         return 0;
3001 }
3002 DEFINE_SHOW_ATTRIBUTE(clk_dump);
3003 
3004 static const struct {
3005         unsigned long flag;
3006         const char *name;
3007 } clk_flags[] = {
3008 #define ENTRY(f) { f, #f }
3009         ENTRY(CLK_SET_RATE_GATE),
3010         ENTRY(CLK_SET_PARENT_GATE),
3011         ENTRY(CLK_SET_RATE_PARENT),
3012         ENTRY(CLK_IGNORE_UNUSED),
3013         ENTRY(CLK_GET_RATE_NOCACHE),
3014         ENTRY(CLK_SET_RATE_NO_REPARENT),
3015         ENTRY(CLK_GET_ACCURACY_NOCACHE),
3016         ENTRY(CLK_RECALC_NEW_RATES),
3017         ENTRY(CLK_SET_RATE_UNGATE),
3018         ENTRY(CLK_IS_CRITICAL),
3019         ENTRY(CLK_OPS_PARENT_ENABLE),
3020         ENTRY(CLK_DUTY_CYCLE_PARENT),
3021 #undef ENTRY
3022 };
3023 
3024 static int clk_flags_show(struct seq_file *s, void *data)
3025 {
3026         struct clk_core *core = s->private;
3027         unsigned long flags = core->flags;
3028         unsigned int i;
3029 
3030         for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3031                 if (flags & clk_flags[i].flag) {
3032                         seq_printf(s, "%s\n", clk_flags[i].name);
3033                         flags &= ~clk_flags[i].flag;
3034                 }
3035         }
3036         if (flags) {
3037                 /* Unknown flags */
3038                 seq_printf(s, "0x%lx\n", flags);
3039         }
3040 
3041         return 0;
3042 }
3043 DEFINE_SHOW_ATTRIBUTE(clk_flags);
3044 
3045 static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3046                                  unsigned int i, char terminator)
3047 {
3048         struct clk_core *parent;
3049 
3050         /*
3051          * Go through the following options to fetch a parent's name.
3052          *
3053          * 1. Fetch the registered parent clock and use its name
3054          * 2. Use the global (fallback) name if specified
3055          * 3. Use the local fw_name if provided
3056          * 4. Fetch parent clock's clock-output-name if DT index was set
3057          *
3058          * This may still fail in some cases, such as when the parent is
3059          * specified directly via a struct clk_hw pointer, but it isn't
3060          * registered (yet).
3061          */
3062         parent = clk_core_get_parent_by_index(core, i);
3063         if (parent)
3064                 seq_puts(s, parent->name);
3065         else if (core->parents[i].name)
3066                 seq_puts(s, core->parents[i].name);
3067         else if (core->parents[i].fw_name)
3068                 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3069         else if (core->parents[i].index >= 0)
3070                 seq_puts(s,
3071                          of_clk_get_parent_name(core->of_node,
3072                                                 core->parents[i].index));
3073         else
3074                 seq_puts(s, "(missing)");
3075 
3076         seq_putc(s, terminator);
3077 }
3078 
3079 static int possible_parents_show(struct seq_file *s, void *data)
3080 {
3081         struct clk_core *core = s->private;
3082         int i;
3083 
3084         for (i = 0; i < core->num_parents - 1; i++)
3085                 possible_parent_show(s, core, i, ' ');
3086 
3087         possible_parent_show(s, core, i, '\n');
3088 
3089         return 0;
3090 }
3091 DEFINE_SHOW_ATTRIBUTE(possible_parents);
3092 
3093 static int current_parent_show(struct seq_file *s, void *data)
3094 {
3095         struct clk_core *core = s->private;
3096 
3097         if (core->parent)
3098                 seq_printf(s, "%s\n", core->parent->name);
3099 
3100         return 0;
3101 }
3102 DEFINE_SHOW_ATTRIBUTE(current_parent);
3103 
3104 static int clk_duty_cycle_show(struct seq_file *s, void *data)
3105 {
3106         struct clk_core *core = s->private;
3107         struct clk_duty *duty = &core->duty;
3108 
3109         seq_printf(s, "%u/%u\n", duty->num, duty->den);
3110 
3111         return 0;
3112 }
3113 DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3114 
3115 static int clk_min_rate_show(struct seq_file *s, void *data)
3116 {
3117         struct clk_core *core = s->private;
3118         unsigned long min_rate, max_rate;
3119 
3120         clk_prepare_lock();
3121         clk_core_get_boundaries(core, &min_rate, &max_rate);
3122         clk_prepare_unlock();
3123         seq_printf(s, "%lu\n", min_rate);
3124 
3125         return 0;
3126 }
3127 DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3128 
3129 static int clk_max_rate_show(struct seq_file *s, void *data)
3130 {
3131         struct clk_core *core = s->private;
3132         unsigned long min_rate, max_rate;
3133 
3134         clk_prepare_lock();
3135         clk_core_get_boundaries(core, &min_rate, &max_rate);
3136         clk_prepare_unlock();
3137         seq_printf(s, "%lu\n", max_rate);
3138 
3139         return 0;
3140 }
3141 DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3142 
3143 static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3144 {
3145         struct dentry *root;
3146 
3147         if (!core || !pdentry)
3148                 return;
3149 
3150         root = debugfs_create_dir(core->name, pdentry);
3151         core->dentry = root;
3152 
3153         debugfs_create_ulong("clk_rate", 0444, root, &core->rate);
3154         debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3155         debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3156         debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3157         debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3158         debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3159         debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3160         debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3161         debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3162         debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3163         debugfs_create_file("clk_duty_cycle", 0444, root, core,
3164                             &clk_duty_cycle_fops);
3165 
3166         if (core->num_parents > 0)
3167                 debugfs_create_file("clk_parent", 0444, root, core,
3168                                     &current_parent_fops);
3169 
3170         if (core->num_parents > 1)
3171                 debugfs_create_file("clk_possible_parents", 0444, root, core,
3172                                     &possible_parents_fops);
3173 
3174         if (core->ops->debug_init)
3175                 core->ops->debug_init(core->hw, core->dentry);
3176 }
3177 
3178 /**
3179  * clk_debug_register - add a clk node to the debugfs clk directory
3180  * @core: the clk being added to the debugfs clk directory
3181  *
3182  * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3183  * initialized.  Otherwise it bails out early since the debugfs clk directory
3184  * will be created lazily by clk_debug_init as part of a late_initcall.
3185  */
3186 static void clk_debug_register(struct clk_core *core)
3187 {
3188         mutex_lock(&clk_debug_lock);
3189         hlist_add_head(&core->debug_node, &clk_debug_list);
3190         if (inited)
3191                 clk_debug_create_one(core, rootdir);
3192         mutex_unlock(&clk_debug_lock);
3193 }
3194 
3195  /**
3196  * clk_debug_unregister - remove a clk node from the debugfs clk directory
3197  * @core: the clk being removed from the debugfs clk directory
3198  *
3199  * Dynamically removes a clk and all its child nodes from the
3200  * debugfs clk directory if clk->dentry points to debugfs created by
3201  * clk_debug_register in __clk_core_init.
3202  */
3203 static void clk_debug_unregister(struct clk_core *core)
3204 {
3205         mutex_lock(&clk_debug_lock);
3206         hlist_del_init(&core->debug_node);
3207         debugfs_remove_recursive(core->dentry);
3208         core->dentry = NULL;
3209         mutex_unlock(&clk_debug_lock);
3210 }
3211 
3212 /**
3213  * clk_debug_init - lazily populate the debugfs clk directory
3214  *
3215  * clks are often initialized very early during boot before memory can be
3216  * dynamically allocated and well before debugfs is setup. This function
3217  * populates the debugfs clk directory once at boot-time when we know that
3218  * debugfs is setup. It should only be called once at boot-time, all other clks
3219  * added dynamically will be done so with clk_debug_register.
3220  */
3221 static int __init clk_debug_init(void)
3222 {
3223         struct clk_core *core;
3224 
3225         rootdir = debugfs_create_dir("clk", NULL);
3226 
3227         debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3228                             &clk_summary_fops);
3229         debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3230                             &clk_dump_fops);
3231         debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3232                             &clk_summary_fops);
3233         debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3234                             &clk_dump_fops);
3235 
3236         mutex_lock(&clk_debug_lock);
3237         hlist_for_each_entry(core, &clk_debug_list, debug_node)
3238                 clk_debug_create_one(core, rootdir);
3239 
3240         inited = 1;
3241         mutex_unlock(&clk_debug_lock);
3242 
3243         return 0;
3244 }
3245 late_initcall(clk_debug_init);
3246 #else
3247 static inline void clk_debug_register(struct clk_core *core) { }
3248 static inline void clk_debug_reparent(struct clk_core *core,
3249                                       struct clk_core *new_parent)
3250 {
3251 }
3252 static inline void clk_debug_unregister(struct clk_core *core)
3253 {
3254 }
3255 #endif
3256 
3257 static void clk_core_reparent_orphans_nolock(void)
3258 {
3259         struct clk_core *orphan;
3260         struct hlist_node *tmp2;
3261 
3262         /*
3263          * walk the list of orphan clocks and reparent any that newly finds a
3264          * parent.
3265          */
3266         hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3267                 struct clk_core *parent = __clk_init_parent(orphan);
3268 
3269                 /*
3270                  * We need to use __clk_set_parent_before() and _after() to
3271                  * to properly migrate any prepare/enable count of the orphan
3272                  * clock. This is important for CLK_IS_CRITICAL clocks, which
3273                  * are enabled during init but might not have a parent yet.
3274                  */
3275                 if (parent) {
3276                         /* update the clk tree topology */
3277                         __clk_set_parent_before(orphan, parent);
3278                         __clk_set_parent_after(orphan, parent, NULL);
3279                         __clk_recalc_accuracies(orphan);
3280                         __clk_recalc_rates(orphan, 0);
3281                 }
3282         }
3283 }
3284 
3285 /**
3286  * __clk_core_init - initialize the data structures in a struct clk_core
3287  * @core:       clk_core being initialized
3288  *
3289  * Initializes the lists in struct clk_core, queries the hardware for the
3290  * parent and rate and sets them both.
3291  */
3292 static int __clk_core_init(struct clk_core *core)
3293 {
3294         int ret;
3295         unsigned long rate;
3296 
3297         if (!core)
3298                 return -EINVAL;
3299 
3300         clk_prepare_lock();
3301 
3302         ret = clk_pm_runtime_get(core);
3303         if (ret)
3304                 goto unlock;
3305 
3306         /* check to see if a clock with this name is already registered */
3307         if (clk_core_lookup(core->name)) {
3308                 pr_debug("%s: clk %s already initialized\n",
3309                                 __func__, core->name);
3310                 ret = -EEXIST;
3311                 goto out;
3312         }
3313 
3314         /* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3315         if (core->ops->set_rate &&
3316             !((core->ops->round_rate || core->ops->determine_rate) &&
3317               core->ops->recalc_rate)) {
3318                 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3319                        __func__, core->name);
3320                 ret = -EINVAL;
3321                 goto out;
3322         }
3323 
3324         if (core->ops->set_parent && !core->ops->get_parent) {
3325                 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3326                        __func__, core->name);
3327                 ret = -EINVAL;
3328                 goto out;
3329         }
3330 
3331         if (core->num_parents > 1 && !core->ops->get_parent) {
3332                 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3333                        __func__, core->name);
3334                 ret = -EINVAL;
3335                 goto out;
3336         }
3337 
3338         if (core->ops->set_rate_and_parent &&
3339                         !(core->ops->set_parent && core->ops->set_rate)) {
3340                 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3341                                 __func__, core->name);
3342                 ret = -EINVAL;
3343                 goto out;
3344         }
3345 
3346         /*
3347          * optional platform-specific magic
3348          *
3349          * The .init callback is not used by any of the basic clock types, but
3350          * exists for weird hardware that must perform initialization magic.
3351          * Please consider other ways of solving initialization problems before
3352          * using this callback, as its use is discouraged.
3353          *
3354          * If it exist, this callback should called before any other callback of
3355          * the clock
3356          */
3357         if (core->ops->init)
3358                 core->ops->init(core->hw);
3359 
3360 
3361         core->parent = __clk_init_parent(core);
3362 
3363         /*
3364          * Populate core->parent if parent has already been clk_core_init'd. If
3365          * parent has not yet been clk_core_init'd then place clk in the orphan
3366          * list.  If clk doesn't have any parents then place it in the root
3367          * clk list.
3368          *
3369          * Every time a new clk is clk_init'd then we walk the list of orphan
3370          * clocks and re-parent any that are children of the clock currently
3371          * being clk_init'd.
3372          */
3373         if (core->parent) {
3374                 hlist_add_head(&core->child_node,
3375                                 &core->parent->children);
3376                 core->orphan = core->parent->orphan;
3377         } else if (!core->num_parents) {
3378                 hlist_add_head(&core->child_node, &clk_root_list);
3379                 core->orphan = false;
3380         } else {
3381                 hlist_add_head(&core->child_node, &clk_orphan_list);
3382                 core->orphan = true;
3383         }
3384 
3385         /*
3386          * Set clk's accuracy.  The preferred method is to use
3387          * .recalc_accuracy. For simple clocks and lazy developers the default
3388          * fallback is to use the parent's accuracy.  If a clock doesn't have a
3389          * parent (or is orphaned) then accuracy is set to zero (perfect
3390          * clock).
3391          */
3392         if (core->ops->recalc_accuracy)
3393                 core->accuracy = core->ops->recalc_accuracy(core->hw,
3394                                         __clk_get_accuracy(core->parent));
3395         else if (core->parent)
3396                 core->accuracy = core->parent->accuracy;
3397         else
3398                 core->accuracy = 0;
3399 
3400         /*
3401          * Set clk's phase by clk_core_get_phase() caching the phase.
3402          * Since a phase is by definition relative to its parent, just
3403          * query the current clock phase, or just assume it's in phase.
3404          */
3405         clk_core_get_phase(core);
3406 
3407         /*
3408          * Set clk's duty cycle.
3409          */
3410         clk_core_update_duty_cycle_nolock(core);
3411 
3412         /*
3413          * Set clk's rate.  The preferred method is to use .recalc_rate.  For
3414          * simple clocks and lazy developers the default fallback is to use the
3415          * parent's rate.  If a clock doesn't have a parent (or is orphaned)
3416          * then rate is set to zero.
3417          */
3418         if (core->ops->recalc_rate)
3419                 rate = core->ops->recalc_rate(core->hw,
3420                                 clk_core_get_rate_nolock(core->parent));
3421         else if (core->parent)
3422                 rate = core->parent->rate;
3423         else
3424                 rate = 0;
3425         core->rate = core->req_rate = rate;
3426 
3427         /*
3428          * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3429          * don't get accidentally disabled when walking the orphan tree and
3430          * reparenting clocks
3431          */
3432         if (core->flags & CLK_IS_CRITICAL) {
3433                 unsigned long flags;
3434 
3435                 ret = clk_core_prepare(core);
3436                 if (ret)
3437                         goto out;
3438 
3439                 flags = clk_enable_lock();
3440                 ret = clk_core_enable(core);
3441                 clk_enable_unlock(flags);
3442                 if (ret) {
3443                         clk_core_unprepare(core);
3444                         goto out;
3445                 }
3446         }
3447 
3448         clk_core_reparent_orphans_nolock();
3449 
3450 
3451         kref_init(&core->ref);
3452 out:
3453         clk_pm_runtime_put(core);
3454 unlock:
3455         if (ret)
3456                 hlist_del_init(&core->child_node);
3457 
3458         clk_prepare_unlock();
3459 
3460         if (!ret)
3461                 clk_debug_register(core);
3462 
3463         return ret;
3464 }
3465 
3466 /**
3467  * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3468  * @core: clk to add consumer to
3469  * @clk: consumer to link to a clk
3470  */
3471 static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3472 {
3473         clk_prepare_lock();
3474         hlist_add_head(&clk->clks_node, &core->clks);
3475         clk_prepare_unlock();
3476 }
3477 
3478 /**
3479  * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3480  * @clk: consumer to unlink
3481  */
3482 static void clk_core_unlink_consumer(struct clk *clk)
3483 {
3484         lockdep_assert_held(&prepare_lock);
3485         hlist_del(&clk->clks_node);
3486 }
3487 
3488 /**
3489  * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3490  * @core: clk to allocate a consumer for
3491  * @dev_id: string describing device name
3492  * @con_id: connection ID string on device
3493  *
3494  * Returns: clk consumer left unlinked from the consumer list
3495  */
3496 static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3497                              const char *con_id)
3498 {
3499         struct clk *clk;
3500 
3501         clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3502         if (!clk)
3503                 return ERR_PTR(-ENOMEM);
3504 
3505         clk->core = core;
3506         clk->dev_id = dev_id;
3507         clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3508         clk->max_rate = ULONG_MAX;
3509 
3510         return clk;
3511 }
3512 
3513 /**
3514  * free_clk - Free a clk consumer
3515  * @clk: clk consumer to free
3516  *
3517  * Note, this assumes the clk has been unlinked from the clk_core consumer
3518  * list.
3519  */
3520 static void free_clk(struct clk *clk)
3521 {
3522         kfree_const(clk->con_id);
3523         kfree(clk);
3524 }
3525 
3526 /**
3527  * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3528  * a clk_hw
3529  * @dev: clk consumer device
3530  * @hw: clk_hw associated with the clk being consumed
3531  * @dev_id: string describing device name
3532  * @con_id: connection ID string on device
3533  *
3534  * This is the main function used to create a clk pointer for use by clk
3535  * consumers. It connects a consumer to the clk_core and clk_hw structures
3536  * used by the framework and clk provider respectively.
3537  */
3538 struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3539                               const char *dev_id, const char *con_id)
3540 {
3541         struct clk *clk;
3542         struct clk_core *core;
3543 
3544         /* This is to allow this function to be chained to others */
3545         if (IS_ERR_OR_NULL(hw))
3546                 return ERR_CAST(hw);
3547 
3548         core = hw->core;
3549         clk = alloc_clk(core, dev_id, con_id);
3550         if (IS_ERR(clk))
3551                 return clk;
3552         clk->dev = dev;
3553 
3554         if (!try_module_get(core->owner)) {
3555                 free_clk(clk);
3556                 return ERR_PTR(-ENOENT);
3557         }
3558 
3559         kref_get(&core->ref);
3560         clk_core_link_consumer(core, clk);
3561 
3562         return clk;
3563 }
3564 
3565 static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3566 {
3567         const char *dst;
3568 
3569         if (!src) {
3570                 if (must_exist)
3571                         return -EINVAL;
3572                 return 0;
3573         }
3574 
3575         *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3576         if (!dst)
3577                 return -ENOMEM;
3578 
3579         return 0;
3580 }
3581 
3582 static int clk_core_populate_parent_map(struct clk_core *core,
3583                                         const struct clk_init_data *init)
3584 {
3585         u8 num_parents = init->num_parents;
3586         const char * const *parent_names = init->parent_names;
3587         const struct clk_hw **parent_hws = init->parent_hws;
3588         const struct clk_parent_data *parent_data = init->parent_data;
3589         int i, ret = 0;
3590         struct clk_parent_map *parents, *parent;
3591 
3592         if (!num_parents)
3593                 return 0;
3594 
3595         /*
3596          * Avoid unnecessary string look-ups of clk_core's possible parents by
3597          * having a cache of names/clk_hw pointers to clk_core pointers.
3598          */
3599         parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3600         core->parents = parents;
3601         if (!parents)
3602                 return -ENOMEM;
3603 
3604         /* Copy everything over because it might be __initdata */
3605         for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3606                 parent->index = -1;
3607                 if (parent_names) {
3608                         /* throw a WARN if any entries are NULL */
3609                         WARN(!parent_names[i],
3610                                 "%s: invalid NULL in %s's .parent_names\n",
3611                                 __func__, core->name);
3612                         ret = clk_cpy_name(&parent->name, parent_names[i],
3613                                            true);
3614                 } else if (parent_data) {
3615                         parent->hw = parent_data[i].hw;
3616                         parent->index = parent_data[i].index;
3617                         ret = clk_cpy_name(&parent->fw_name,
3618                                            parent_data[i].fw_name, false);
3619                         if (!ret)
3620                                 ret = clk_cpy_name(&parent->name,
3621                                                    parent_data[i].name,
3622                                                    false);
3623                 } else if (parent_hws) {
3624                         parent->hw = parent_hws[i];
3625                 } else {
3626                         ret = -EINVAL;
3627                         WARN(1, "Must specify parents if num_parents > 0\n");
3628                 }
3629 
3630                 if (ret) {
3631                         do {
3632                                 kfree_const(parents[i].name);
3633                                 kfree_const(parents[i].fw_name);
3634                         } while (--i >= 0);
3635                         kfree(parents);
3636 
3637                         return ret;
3638                 }
3639         }
3640 
3641         return 0;
3642 }
3643 
3644 static void clk_core_free_parent_map(struct clk_core *core)
3645 {
3646         int i = core->num_parents;
3647 
3648         if (!core->num_parents)
3649                 return;
3650 
3651         while (--i >= 0) {
3652                 kfree_const(core->parents[i].name);
3653                 kfree_const(core->parents[i].fw_name);
3654         }
3655 
3656         kfree(core->parents);
3657 }
3658 
3659 static struct clk *
3660 __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3661 {
3662         int ret;
3663         struct clk_core *core;
3664         const struct clk_init_data *init = hw->init;
3665 
3666         /*
3667          * The init data is not supposed to be used outside of registration path.
3668          * Set it to NULL so that provider drivers can't use it either and so that
3669          * we catch use of hw->init early on in the core.
3670          */
3671         hw->init = NULL;
3672 
3673         core = kzalloc(sizeof(*core), GFP_KERNEL);
3674         if (!core) {
3675                 ret = -ENOMEM;
3676                 goto fail_out;
3677         }
3678 
3679         core->name = kstrdup_const(init->name, GFP_KERNEL);
3680         if (!core->name) {
3681                 ret = -ENOMEM;
3682                 goto fail_name;
3683         }
3684 
3685         if (WARN_ON(!init->ops)) {
3686                 ret = -EINVAL;
3687                 goto fail_ops;
3688         }
3689         core->ops = init->ops;
3690 
3691         if (dev && pm_runtime_enabled(dev))
3692                 core->rpm_enabled = true;
3693         core->dev = dev;
3694         core->of_node = np;
3695         if (dev && dev->driver)
3696                 core->owner = dev->driver->owner;
3697         core->hw = hw;
3698         core->flags = init->flags;
3699         core->num_parents = init->num_parents;
3700         core->min_rate = 0;
3701         core->max_rate = ULONG_MAX;
3702         hw->core = core;
3703 
3704         ret = clk_core_populate_parent_map(core, init);
3705         if (ret)
3706                 goto fail_parents;
3707 
3708         INIT_HLIST_HEAD(&core->clks);
3709 
3710         /*
3711          * Don't call clk_hw_create_clk() here because that would pin the
3712          * provider module to itself and prevent it from ever being removed.
3713          */
3714         hw->clk = alloc_clk(core, NULL, NULL);
3715         if (IS_ERR(hw->clk)) {
3716                 ret = PTR_ERR(hw->clk);
3717                 goto fail_create_clk;
3718         }
3719 
3720         clk_core_link_consumer(hw->core, hw->clk);
3721 
3722         ret = __clk_core_init(core);
3723         if (!ret)
3724                 return hw->clk;
3725 
3726         clk_prepare_lock();
3727         clk_core_unlink_consumer(hw->clk);
3728         clk_prepare_unlock();
3729 
3730         free_clk(hw->clk);
3731         hw->clk = NULL;
3732 
3733 fail_create_clk:
3734         clk_core_free_parent_map(core);
3735 fail_parents:
3736 fail_ops:
3737         kfree_const(core->name);
3738 fail_name:
3739         kfree(core);
3740 fail_out:
3741         return ERR_PTR(ret);
3742 }
3743 
3744 /**
3745  * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
3746  * @dev: Device to get device node of
3747  *
3748  * Return: device node pointer of @dev, or the device node pointer of
3749  * @dev->parent if dev doesn't have a device node, or NULL if neither
3750  * @dev or @dev->parent have a device node.
3751  */
3752 static struct device_node *dev_or_parent_of_node(struct device *dev)
3753 {
3754         struct device_node *np;
3755 
3756         if (!dev)
3757                 return NULL;
3758 
3759         np = dev_of_node(dev);
3760         if (!np)
3761                 np = dev_of_node(dev->parent);
3762 
3763         return np;
3764 }
3765 
3766 /**
3767  * clk_register - allocate a new clock, register it and return an opaque cookie
3768  * @dev: device that is registering this clock
3769  * @hw: link to hardware-specific clock data
3770  *
3771  * clk_register is the *deprecated* interface for populating the clock tree with
3772  * new clock nodes. Use clk_hw_register() instead.
3773  *
3774  * Returns: a pointer to the newly allocated struct clk which
3775  * cannot be dereferenced by driver code but may be used in conjunction with the
3776  * rest of the clock API.  In the event of an error clk_register will return an
3777  * error code; drivers must test for an error code after calling clk_register.
3778  */
3779 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3780 {
3781         return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3782 }
3783 EXPORT_SYMBOL_GPL(clk_register);
3784 
3785 /**
3786  * clk_hw_register - register a clk_hw and return an error code
3787  * @dev: device that is registering this clock
3788  * @hw: link to hardware-specific clock data
3789  *
3790  * clk_hw_register is the primary interface for populating the clock tree with
3791  * new clock nodes. It returns an integer equal to zero indicating success or
3792  * less than zero indicating failure. Drivers must test for an error code after
3793  * calling clk_hw_register().
3794  */
3795 int clk_hw_register(struct device *dev, struct clk_hw *hw)
3796 {
3797         return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3798                                hw));
3799 }
3800 EXPORT_SYMBOL_GPL(clk_hw_register);
3801 
3802 /*
3803  * of_clk_hw_register - register a clk_hw and return an error code
3804  * @node: device_node of device that is registering this clock
3805  * @hw: link to hardware-specific clock data
3806  *
3807  * of_clk_hw_register() is the primary interface for populating the clock tree
3808  * with new clock nodes when a struct device is not available, but a struct
3809  * device_node is. It returns an integer equal to zero indicating success or
3810  * less than zero indicating failure. Drivers must test for an error code after
3811  * calling of_clk_hw_register().
3812  */
3813 int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3814 {
3815         return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3816 }
3817 EXPORT_SYMBOL_GPL(of_clk_hw_register);
3818 
3819 /* Free memory allocated for a clock. */
3820 static void __clk_release(struct kref *ref)
3821 {
3822         struct clk_core *core = container_of(ref, struct clk_core, ref);
3823 
3824         lockdep_assert_held(&prepare_lock);
3825 
3826         clk_core_free_parent_map(core);
3827         kfree_const(core->name);
3828         kfree(core);
3829 }
3830 
3831 /*
3832  * Empty clk_ops for unregistered clocks. These are used temporarily
3833  * after clk_unregister() was called on a clock and until last clock
3834  * consumer calls clk_put() and the struct clk object is freed.
3835  */
3836 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3837 {
3838         return -ENXIO;
3839 }
3840 
3841 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3842 {
3843         WARN_ON_ONCE(1);
3844 }
3845 
3846 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3847                                         unsigned long parent_rate)
3848 {
3849         return -ENXIO;
3850 }
3851 
3852 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3853 {
3854         return -ENXIO;
3855 }
3856 
3857 static const struct clk_ops clk_nodrv_ops = {
3858         .enable         = clk_nodrv_prepare_enable,
3859         .disable        = clk_nodrv_disable_unprepare,
3860         .prepare        = clk_nodrv_prepare_enable,
3861         .unprepare      = clk_nodrv_disable_unprepare,
3862         .set_rate       = clk_nodrv_set_rate,
3863         .set_parent     = clk_nodrv_set_parent,
3864 };
3865 
3866 static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3867                                                 struct clk_core *target)
3868 {
3869         int i;
3870         struct clk_core *child;
3871 
3872         for (i = 0; i < root->num_parents; i++)
3873                 if (root->parents[i].core == target)
3874                         root->parents[i].core = NULL;
3875 
3876         hlist_for_each_entry(child, &root->children, child_node)
3877                 clk_core_evict_parent_cache_subtree(child, target);
3878 }
3879 
3880 /* Remove this clk from all parent caches */
3881 static void clk_core_evict_parent_cache(struct clk_core *core)
3882 {
3883         struct hlist_head **lists;
3884         struct clk_core *root;
3885 
3886         lockdep_assert_held(&prepare_lock);
3887 
3888         for (lists = all_lists; *lists; lists++)
3889                 hlist_for_each_entry(root, *lists, child_node)
3890                         clk_core_evict_parent_cache_subtree(root, core);
3891 
3892 }
3893 
3894 /**
3895  * clk_unregister - unregister a currently registered clock
3896  * @clk: clock to unregister
3897  */
3898 void clk_unregister(struct clk *clk)
3899 {
3900         unsigned long flags;
3901 
3902         if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3903                 return;
3904 
3905         clk_debug_unregister(clk->core);
3906 
3907         clk_prepare_lock();
3908 
3909         if (clk->core->ops == &clk_nodrv_ops) {
3910                 pr_err("%s: unregistered clock: %s\n", __func__,
3911                        clk->core->name);
3912                 goto unlock;
3913         }
3914         /*
3915          * Assign empty clock ops for consumers that might still hold
3916          * a reference to this clock.
3917          */
3918         flags = clk_enable_lock();
3919         clk->core->ops = &clk_nodrv_ops;
3920         clk_enable_unlock(flags);
3921 
3922         if (!hlist_empty(&clk->core->children)) {
3923                 struct clk_core *child;
3924                 struct hlist_node *t;
3925 
3926                 /* Reparent all children to the orphan list. */
3927                 hlist_for_each_entry_safe(child, t, &clk->core->children,
3928                                           child_node)
3929                         clk_core_set_parent_nolock(child, NULL);
3930         }
3931 
3932         clk_core_evict_parent_cache(clk->core);
3933 
3934         hlist_del_init(&clk->core->child_node);
3935 
3936         if (clk->core->prepare_count)
3937                 pr_warn("%s: unregistering prepared clock: %s\n",
3938                                         __func__, clk->core->name);
3939 
3940         if (clk->core->protect_count)
3941                 pr_warn("%s: unregistering protected clock: %s\n",
3942                                         __func__, clk->core->name);
3943 
3944         kref_put(&clk->core->ref, __clk_release);
3945         free_clk(clk);
3946 unlock:
3947         clk_prepare_unlock();
3948 }
3949 EXPORT_SYMBOL_GPL(clk_unregister);
3950 
3951 /**
3952  * clk_hw_unregister - unregister a currently registered clk_hw
3953  * @hw: hardware-specific clock data to unregister
3954  */
3955 void clk_hw_unregister(struct clk_hw *hw)
3956 {
3957         clk_unregister(hw->clk);
3958 }
3959 EXPORT_SYMBOL_GPL(clk_hw_unregister);
3960 
3961 static void devm_clk_release(struct device *dev, void *res)
3962 {
3963         clk_unregister(*(struct clk **)res);
3964 }
3965 
3966 static void devm_clk_hw_release(struct device *dev, void *res)
3967 {
3968         clk_hw_unregister(*(struct clk_hw **)res);
3969 }
3970 
3971 /**
3972  * devm_clk_register - resource managed clk_register()
3973  * @dev: device that is registering this clock
3974  * @hw: link to hardware-specific clock data
3975  *
3976  * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
3977  *
3978  * Clocks returned from this function are automatically clk_unregister()ed on
3979  * driver detach. See clk_register() for more information.
3980  */
3981 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3982 {
3983         struct clk *clk;
3984         struct clk **clkp;
3985 
3986         clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3987         if (!clkp)
3988                 return ERR_PTR(-ENOMEM);
3989 
3990         clk = clk_register(dev, hw);
3991         if (!IS_ERR(clk)) {
3992                 *clkp = clk;
3993                 devres_add(dev, clkp);
3994         } else {
3995                 devres_free(clkp);
3996         }
3997 
3998         return clk;
3999 }
4000 EXPORT_SYMBOL_GPL(devm_clk_register);
4001 
4002 /**
4003  * devm_clk_hw_register - resource managed clk_hw_register()
4004  * @dev: device that is registering this clock
4005  * @hw: link to hardware-specific clock data
4006  *
4007  * Managed clk_hw_register(). Clocks registered by this function are
4008  * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4009  * for more information.
4010  */
4011 int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4012 {
4013         struct clk_hw **hwp;
4014         int ret;
4015 
4016         hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4017         if (!hwp)
4018                 return -ENOMEM;
4019 
4020         ret = clk_hw_register(dev, hw);
4021         if (!ret) {
4022                 *hwp = hw;
4023                 devres_add(dev, hwp);
4024         } else {
4025                 devres_free(hwp);
4026         }
4027 
4028         return ret;
4029 }
4030 EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4031 
4032 static int devm_clk_match(struct device *dev, void *res, void *data)
4033 {
4034         struct clk *c = res;
4035         if (WARN_ON(!c))
4036                 return 0;
4037         return c == data;
4038 }
4039 
4040 static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4041 {
4042         struct clk_hw *hw = res;
4043 
4044         if (WARN_ON(!hw))
4045                 return 0;
4046         return hw == data;
4047 }
4048 
4049 /**
4050  * devm_clk_unregister - resource managed clk_unregister()
4051  * @clk: clock to unregister
4052  *
4053  * Deallocate a clock allocated with devm_clk_register(). Normally
4054  * this function will not need to be called and the resource management
4055  * code will ensure that the resource is freed.
4056  */
4057 void devm_clk_unregister(struct device *dev, struct clk *clk)
4058 {
4059         WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4060 }
4061 EXPORT_SYMBOL_GPL(devm_clk_unregister);
4062 
4063 /**
4064  * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4065  * @dev: device that is unregistering the hardware-specific clock data
4066  * @hw: link to hardware-specific clock data
4067  *
4068  * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4069  * this function will not need to be called and the resource management
4070  * code will ensure that the resource is freed.
4071  */
4072 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4073 {
4074         WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4075                                 hw));
4076 }
4077 EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4078 
4079 /*
4080  * clkdev helpers
4081  */
4082 
4083 void __clk_put(struct clk *clk)
4084 {
4085         struct module *owner;
4086 
4087         if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4088                 return;
4089 
4090         clk_prepare_lock();
4091 
4092         /*
4093          * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4094          * given user should be balanced with calls to clk_rate_exclusive_put()
4095          * and by that same consumer
4096          */
4097         if (WARN_ON(clk->exclusive_count)) {
4098                 /* We voiced our concern, let's sanitize the situation */
4099                 clk->core->protect_count -= (clk->exclusive_count - 1);
4100                 clk_core_rate_unprotect(clk->core);
4101                 clk->exclusive_count = 0;
4102         }
4103 
4104         hlist_del(&clk->clks_node);
4105         if (clk->min_rate > clk->core->req_rate ||
4106             clk->max_rate < clk->core->req_rate)
4107                 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4108 
4109         owner = clk->core->owner;
4110         kref_put(&clk->core->ref, __clk_release);
4111 
4112         clk_prepare_unlock();
4113 
4114         module_put(owner);
4115 
4116         free_clk(clk);
4117 }
4118 
4119 /***        clk rate change notifiers        ***/
4120 
4121 /**
4122  * clk_notifier_register - add a clk rate change notifier
4123  * @clk: struct clk * to watch
4124  * @nb: struct notifier_block * with callback info
4125  *
4126  * Request notification when clk's rate changes.  This uses an SRCU
4127  * notifier because we want it to block and notifier unregistrations are
4128  * uncommon.  The callbacks associated with the notifier must not
4129  * re-enter into the clk framework by calling any top-level clk APIs;
4130  * this will cause a nested prepare_lock mutex.
4131  *
4132  * In all notification cases (pre, post and abort rate change) the original
4133  * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4134  * and the new frequency is passed via struct clk_notifier_data.new_rate.
4135  *
4136  * clk_notifier_register() must be called from non-atomic context.
4137  * Returns -EINVAL if called with null arguments, -ENOMEM upon
4138  * allocation failure; otherwise, passes along the return value of
4139  * srcu_notifier_chain_register().
4140  */
4141 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4142 {
4143         struct clk_notifier *cn;
4144         int ret = -ENOMEM;
4145 
4146         if (!clk || !nb)
4147                 return -EINVAL;
4148 
4149         clk_prepare_lock();
4150 
4151         /* search the list of notifiers for this clk */
4152         list_for_each_entry(cn, &clk_notifier_list, node)
4153                 if (cn->clk == clk)
4154                         break;
4155 
4156         /* if clk wasn't in the notifier list, allocate new clk_notifier */
4157         if (cn->clk != clk) {
4158                 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4159                 if (!cn)
4160                         goto out;
4161 
4162                 cn->clk = clk;
4163                 srcu_init_notifier_head(&cn->notifier_head);
4164 
4165                 list_add(&cn->node, &clk_notifier_list);
4166         }
4167 
4168         ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4169 
4170         clk->core->notifier_count++;
4171 
4172 out:
4173         clk_prepare_unlock();
4174 
4175         return ret;
4176 }
4177 EXPORT_SYMBOL_GPL(clk_notifier_register);
4178 
4179 /**
4180  * clk_notifier_unregister - remove a clk rate change notifier
4181  * @clk: struct clk *
4182  * @nb: struct notifier_block * with callback info
4183  *
4184  * Request no further notification for changes to 'clk' and frees memory
4185  * allocated in clk_notifier_register.
4186  *
4187  * Returns -EINVAL if called with null arguments; otherwise, passes
4188  * along the return value of srcu_notifier_chain_unregister().
4189  */
4190 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4191 {
4192         struct clk_notifier *cn = NULL;
4193         int ret = -EINVAL;
4194 
4195         if (!clk || !nb)
4196                 return -EINVAL;
4197 
4198         clk_prepare_lock();
4199 
4200         list_for_each_entry(cn, &clk_notifier_list, node)
4201                 if (cn->clk == clk)
4202                         break;
4203 
4204         if (cn->clk == clk) {
4205                 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4206 
4207                 clk->core->notifier_count--;
4208 
4209                 /* XXX the notifier code should handle this better */
4210                 if (!cn->notifier_head.head) {
4211                         srcu_cleanup_notifier_head(&cn->notifier_head);
4212                         list_del(&cn->node);
4213                         kfree(cn);
4214                 }
4215 
4216         } else {
4217                 ret = -ENOENT;
4218         }
4219 
4220         clk_prepare_unlock();
4221 
4222         return ret;
4223 }
4224 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4225 
4226 #ifdef CONFIG_OF
4227 static void clk_core_reparent_orphans(void)
4228 {
4229         clk_prepare_lock();
4230         clk_core_reparent_orphans_nolock();
4231         clk_prepare_unlock();
4232 }
4233 
4234 /**
4235  * struct of_clk_provider - Clock provider registration structure
4236  * @link: Entry in global list of clock providers
4237  * @node: Pointer to device tree node of clock provider
4238  * @get: Get clock callback.  Returns NULL or a struct clk for the
4239  *       given clock specifier
4240  * @data: context pointer to be passed into @get callback
4241  */
4242 struct of_clk_provider {
4243         struct list_head link;
4244 
4245         struct device_node *node;
4246         struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4247         struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4248         void *data;
4249 };
4250 
4251 extern struct of_device_id __clk_of_table;
4252 static const struct of_device_id __clk_of_table_sentinel
4253         __used __section(__clk_of_table_end);
4254 
4255 static LIST_HEAD(of_clk_providers);
4256 static DEFINE_MUTEX(of_clk_mutex);
4257 
4258 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4259                                      void *data)
4260 {
4261         return data;
4262 }
4263 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4264 
4265 struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4266 {
4267         return data;
4268 }
4269 EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4270 
4271 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4272 {
4273         struct clk_onecell_data *clk_data = data;
4274         unsigned int idx = clkspec->args[0];
4275 
4276         if (idx >= clk_data->clk_num) {
4277                 pr_err("%s: invalid clock index %u\n", __func__, idx);
4278                 return ERR_PTR(-EINVAL);
4279         }
4280 
4281         return clk_data->clks[idx];
4282 }
4283 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4284 
4285 struct clk_hw *
4286 of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4287 {
4288         struct clk_hw_onecell_data *hw_data = data;
4289         unsigned int idx = clkspec->args[0];
4290 
4291         if (idx >= hw_data->num) {
4292                 pr_err("%s: invalid index %u\n", __func__, idx);
4293                 return ERR_PTR(-EINVAL);
4294         }
4295 
4296         return hw_data->hws[idx];
4297 }
4298 EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4299 
4300 /**
4301  * of_clk_add_provider() - Register a clock provider for a node
4302  * @np: Device node pointer associated with clock provider
4303  * @clk_src_get: callback for decoding clock
4304  * @data: context pointer for @clk_src_get callback.
4305  *
4306  * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4307  */
4308 int of_clk_add_provider(struct device_node *np,
4309                         struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4310                                                    void *data),
4311                         void *data)
4312 {
4313         struct of_clk_provider *cp;
4314         int ret;
4315 
4316         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4317         if (!cp)
4318                 return -ENOMEM;
4319 
4320         cp->node = of_node_get(np);
4321         cp->data = data;
4322         cp->get = clk_src_get;
4323 
4324         mutex_lock(&of_clk_mutex);
4325         list_add(&cp->link, &of_clk_providers);
4326         mutex_unlock(&of_clk_mutex);
4327         pr_debug("Added clock from %pOF\n", np);
4328 
4329         clk_core_reparent_orphans();
4330 
4331         ret = of_clk_set_defaults(np, true);
4332         if (ret < 0)
4333                 of_clk_del_provider(np);
4334 
4335         return ret;
4336 }
4337 EXPORT_SYMBOL_GPL(of_clk_add_provider);
4338 
4339 /**
4340  * of_clk_add_hw_provider() - Register a clock provider for a node
4341  * @np: Device node pointer associated with clock provider
4342  * @get: callback for decoding clk_hw
4343  * @data: context pointer for @get callback.
4344  */
4345 int of_clk_add_hw_provider(struct device_node *np,
4346                            struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4347                                                  void *data),
4348                            void *data)
4349 {
4350         struct of_clk_provider *cp;
4351         int ret;
4352 
4353         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4354         if (!cp)
4355                 return -ENOMEM;
4356 
4357         cp->node = of_node_get(np);
4358         cp->data = data;
4359         cp->get_hw = get;
4360 
4361         mutex_lock(&of_clk_mutex);
4362         list_add(&cp->link, &of_clk_providers);
4363         mutex_unlock(&of_clk_mutex);
4364         pr_debug("Added clk_hw provider from %pOF\n", np);
4365 
4366         clk_core_reparent_orphans();
4367 
4368         ret = of_clk_set_defaults(np, true);
4369         if (ret < 0)
4370                 of_clk_del_provider(np);
4371 
4372         return ret;
4373 }
4374 EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4375 
4376 static void devm_of_clk_release_provider(struct device *dev, void *res)
4377 {
4378         of_clk_del_provider(*(struct device_node **)res);
4379 }
4380 
4381 /*
4382  * We allow a child device to use its parent device as the clock provider node
4383  * for cases like MFD sub-devices where the child device driver wants to use
4384  * devm_*() APIs but not list the device in DT as a sub-node.
4385  */
4386 static struct device_node *get_clk_provider_node(struct device *dev)
4387 {
4388         struct device_node *np, *parent_np;
4389 
4390         np = dev->of_node;
4391         parent_np = dev->parent ? dev->parent->of_node : NULL;
4392 
4393         if (!of_find_property(np, "#clock-cells", NULL))
4394                 if (of_find_property(parent_np, "#clock-cells", NULL))
4395                         np = parent_np;
4396 
4397         return np;
4398 }
4399 
4400 /**
4401  * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4402  * @dev: Device acting as the clock provider (used for DT node and lifetime)
4403  * @get: callback for decoding clk_hw
4404  * @data: context pointer for @get callback
4405  *
4406  * Registers clock provider for given device's node. If the device has no DT
4407  * node or if the device node lacks of clock provider information (#clock-cells)
4408  * then the parent device's node is scanned for this information. If parent node
4409  * has the #clock-cells then it is used in registration. Provider is
4410  * automatically released at device exit.
4411  *
4412  * Return: 0 on success or an errno on failure.
4413  */
4414 int devm_of_clk_add_hw_provider(struct device *dev,
4415                         struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4416                                               void *data),
4417                         void *data)
4418 {
4419         struct device_node **ptr, *np;
4420         int ret;
4421 
4422         ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4423                            GFP_KERNEL);
4424         if (!ptr)
4425                 return -ENOMEM;
4426 
4427         np = get_clk_provider_node(dev);
4428         ret = of_clk_add_hw_provider(np, get, data);
4429         if (!ret) {
4430                 *ptr = np;
4431                 devres_add(dev, ptr);
4432         } else {
4433                 devres_free(ptr);
4434         }
4435 
4436         return ret;
4437 }
4438 EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4439 
4440 /**
4441  * of_clk_del_provider() - Remove a previously registered clock provider
4442  * @np: Device node pointer associated with clock provider
4443  */
4444 void of_clk_del_provider(struct device_node *np)
4445 {
4446         struct of_clk_provider *cp;
4447 
4448         mutex_lock(&of_clk_mutex);
4449         list_for_each_entry(cp, &of_clk_providers, link) {
4450                 if (cp->node == np) {
4451                         list_del(&cp->link);
4452                         of_node_put(cp->node);
4453                         kfree(cp);
4454                         break;
4455                 }
4456         }
4457         mutex_unlock(&of_clk_mutex);
4458 }
4459 EXPORT_SYMBOL_GPL(of_clk_del_provider);
4460 
4461 static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4462 {
4463         struct device_node **np = res;
4464 
4465         if (WARN_ON(!np || !*np))
4466                 return 0;
4467 
4468         return *np == data;
4469 }
4470 
4471 /**
4472  * devm_of_clk_del_provider() - Remove clock provider registered using devm
4473  * @dev: Device to whose lifetime the clock provider was bound
4474  */
4475 void devm_of_clk_del_provider(struct device *dev)
4476 {
4477         int ret;
4478         struct device_node *np = get_clk_provider_node(dev);
4479 
4480         ret = devres_release(dev, devm_of_clk_release_provider,
4481                              devm_clk_provider_match, np);
4482 
4483         WARN_ON(ret);
4484 }
4485 EXPORT_SYMBOL(devm_of_clk_del_provider);
4486 
4487 /**
4488  * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4489  * @np: device node to parse clock specifier from
4490  * @index: index of phandle to parse clock out of. If index < 0, @name is used
4491  * @name: clock name to find and parse. If name is NULL, the index is used
4492  * @out_args: Result of parsing the clock specifier
4493  *
4494  * Parses a device node's "clocks" and "clock-names" properties to find the
4495  * phandle and cells for the index or name that is desired. The resulting clock
4496  * specifier is placed into @out_args, or an errno is returned when there's a
4497  * parsing error. The @index argument is ignored if @name is non-NULL.
4498  *
4499  * Example:
4500  *
4501  * phandle1: clock-controller@1 {
4502  *      #clock-cells = <2>;
4503  * }
4504  *
4505  * phandle2: clock-controller@2 {
4506  *      #clock-cells = <1>;
4507  * }
4508  *
4509  * clock-consumer@3 {
4510  *      clocks = <&phandle1 1 2 &phandle2 3>;
4511  *      clock-names = "name1", "name2";
4512  * }
4513  *
4514  * To get a device_node for `clock-controller@2' node you may call this
4515  * function a few different ways:
4516  *
4517  *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4518  *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4519  *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4520  *
4521  * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4522  * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4523  * the "clock-names" property of @np.
4524  */
4525 static int of_parse_clkspec(const struct device_node *np, int index,
4526                             const char *name, struct of_phandle_args *out_args)
4527 {
4528         int ret = -ENOENT;
4529 
4530         /* Walk up the tree of devices looking for a clock property that matches */
4531         while (np) {
4532                 /*
4533                  * For named clocks, first look up the name in the
4534                  * "clock-names" property.  If it cannot be found, then index
4535                  * will be an error code and of_parse_phandle_with_args() will
4536                  * return -EINVAL.
4537                  */
4538                 if (name)
4539                         index = of_property_match_string(np, "clock-names", name);
4540                 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4541                                                  index, out_args);
4542                 if (!ret)
4543                         break;
4544                 if (name && index >= 0)
4545                         break;
4546 
4547                 /*
4548                  * No matching clock found on this node.  If the parent node
4549                  * has a "clock-ranges" property, then we can try one of its
4550                  * clocks.
4551                  */
4552                 np = np->parent;
4553                 if (np && !of_get_property(np, "clock-ranges", NULL))
4554                         break;
4555                 index = 0;
4556         }
4557 
4558         return ret;
4559 }
4560 
4561 static struct clk_hw *
4562 __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4563                               struct of_phandle_args *clkspec)
4564 {
4565         struct clk *clk;
4566 
4567         if (provider->get_hw)
4568                 return provider->get_hw(clkspec, provider->data);
4569 
4570         clk = provider->get(clkspec, provider->data);
4571         if (IS_ERR(clk))
4572                 return ERR_CAST(clk);
4573         return __clk_get_hw(clk);
4574 }
4575 
4576 static struct clk_hw *
4577 of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4578 {
4579         struct of_clk_provider *provider;
4580         struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4581 
4582         if (!clkspec)
4583                 return ERR_PTR(-EINVAL);
4584 
4585         mutex_lock(&of_clk_mutex);
4586         list_for_each_entry(provider, &of_clk_providers, link) {
4587                 if (provider->node == clkspec->np) {
4588                         hw = __of_clk_get_hw_from_provider(provider, clkspec);
4589                         if (!IS_ERR(hw))
4590                                 break;
4591                 }
4592         }
4593         mutex_unlock(&of_clk_mutex);
4594 
4595         return hw;
4596 }
4597 
4598 /**
4599  * of_clk_get_from_provider() - Lookup a clock from a clock provider
4600  * @clkspec: pointer to a clock specifier data structure
4601  *
4602  * This function looks up a struct clk from the registered list of clock
4603  * providers, an input is a clock specifier data structure as returned
4604  * from the of_parse_phandle_with_args() function call.
4605  */
4606 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4607 {
4608         struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4609 
4610         return clk_hw_create_clk(NULL, hw, NULL, __func__);
4611 }
4612 EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4613 
4614 struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4615                              const char *con_id)
4616 {
4617         int ret;
4618         struct clk_hw *hw;
4619         struct of_phandle_args clkspec;
4620 
4621         ret = of_parse_clkspec(np, index, con_id, &clkspec);
4622         if (ret)
4623                 return ERR_PTR(ret);
4624 
4625         hw = of_clk_get_hw_from_clkspec(&clkspec);
4626         of_node_put(clkspec.np);
4627 
4628         return hw;
4629 }
4630 
4631 static struct clk *__of_clk_get(struct device_node *np,
4632                                 int index, const char *dev_id,
4633                                 const char *con_id)
4634 {
4635         struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4636 
4637         return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4638 }
4639 
4640 struct clk *of_clk_get(struct device_node *np, int index)
4641 {
4642         return __of_clk_get(np, index, np->full_name, NULL);
4643 }
4644 EXPORT_SYMBOL(of_clk_get);
4645 
4646 /**
4647  * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4648  * @np: pointer to clock consumer node
4649  * @name: name of consumer's clock input, or NULL for the first clock reference
4650  *
4651  * This function parses the clocks and clock-names properties,
4652  * and uses them to look up the struct clk from the registered list of clock
4653  * providers.
4654  */
4655 struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4656 {
4657         if (!np)
4658                 return ERR_PTR(-ENOENT);
4659 
4660         return __of_clk_get(np, 0, np->full_name, name);
4661 }
4662 EXPORT_SYMBOL(of_clk_get_by_name);
4663 
4664 /**
4665  * of_clk_get_parent_count() - Count the number of clocks a device node has
4666  * @np: device node to count
4667  *
4668  * Returns: The number of clocks that are possible parents of this node
4669  */
4670 unsigned int of_clk_get_parent_count(struct device_node *np)
4671 {
4672         int count;
4673 
4674         count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4675         if (count < 0)
4676                 return 0;
4677 
4678         return count;
4679 }
4680 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4681 
4682 const char *of_clk_get_parent_name(struct device_node *np, int index)
4683 {
4684         struct of_phandle_args clkspec;
4685         struct property *prop;
4686         const char *clk_name;
4687         const __be32 *vp;
4688         u32 pv;
4689         int rc;
4690         int count;
4691         struct clk *clk;
4692 
4693         rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4694                                         &clkspec);
4695         if (rc)
4696                 return NULL;
4697 
4698         index = clkspec.args_count ? clkspec.args[0] : 0;
4699         count = 0;
4700 
4701         /* if there is an indices property, use it to transfer the index
4702          * specified into an array offset for the clock-output-names property.
4703          */
4704         of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4705                 if (index == pv) {
4706                         index = count;
4707                         break;
4708                 }
4709                 count++;
4710         }
4711         /* We went off the end of 'clock-indices' without finding it */
4712         if (prop && !vp)
4713                 return NULL;
4714 
4715         if (of_property_read_string_index(clkspec.np, "clock-output-names",
4716                                           index,
4717                                           &clk_name) < 0) {
4718                 /*
4719                  * Best effort to get the name if the clock has been
4720                  * registered with the framework. If the clock isn't
4721                  * registered, we return the node name as the name of
4722                  * the clock as long as #clock-cells = 0.
4723                  */
4724                 clk = of_clk_get_from_provider(&clkspec);
4725                 if (IS_ERR(clk)) {
4726                         if (clkspec.args_count == 0)
4727                                 clk_name = clkspec.np->name;
4728                         else
4729                                 clk_name = NULL;
4730                 } else {
4731                         clk_name = __clk_get_name(clk);
4732                         clk_put(clk);
4733                 }
4734         }
4735 
4736 
4737         of_node_put(clkspec.np);
4738         return clk_name;
4739 }
4740 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4741 
4742 /**
4743  * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4744  * number of parents
4745  * @np: Device node pointer associated with clock provider
4746  * @parents: pointer to char array that hold the parents' names
4747  * @size: size of the @parents array
4748  *
4749  * Return: number of parents for the clock node.
4750  */
4751 int of_clk_parent_fill(struct device_node *np, const char **parents,
4752                        unsigned int size)
4753 {
4754         unsigned int i = 0;
4755 
4756         while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4757                 i++;
4758 
4759         return i;
4760 }
4761 EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4762 
4763 struct clock_provider {
4764         void (*clk_init_cb)(struct device_node *);
4765         struct device_node *np;
4766         struct list_head node;
4767 };
4768 
4769 /*
4770  * This function looks for a parent clock. If there is one, then it
4771  * checks that the provider for this parent clock was initialized, in
4772  * this case the parent clock will be ready.
4773  */
4774 static int parent_ready(struct device_node *np)
4775 {
4776         int i = 0;
4777 
4778         while (true) {
4779                 struct clk *clk = of_clk_get(np, i);
4780 
4781                 /* this parent is ready we can check the next one */
4782                 if (!IS_ERR(clk)) {
4783                         clk_put(clk);
4784                         i++;
4785                         continue;
4786                 }
4787 
4788                 /* at least one parent is not ready, we exit now */
4789                 if (PTR_ERR(clk) == -EPROBE_DEFER)
4790                         return 0;
4791 
4792                 /*
4793                  * Here we make assumption that the device tree is
4794                  * written correctly. So an error means that there is
4795                  * no more parent. As we didn't exit yet, then the
4796                  * previous parent are ready. If there is no clock
4797                  * parent, no need to wait for them, then we can
4798                  * consider their absence as being ready
4799                  */
4800                 return 1;
4801         }
4802 }
4803 
4804 /**
4805  * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4806  * @np: Device node pointer associated with clock provider
4807  * @index: clock index
4808  * @flags: pointer to top-level framework flags
4809  *
4810  * Detects if the clock-critical property exists and, if so, sets the
4811  * corresponding CLK_IS_CRITICAL flag.
4812  *
4813  * Do not use this function. It exists only for legacy Device Tree
4814  * bindings, such as the one-clock-per-node style that are outdated.
4815  * Those bindings typically put all clock data into .dts and the Linux
4816  * driver has no clock data, thus making it impossible to set this flag
4817  * correctly from the driver. Only those drivers may call
4818  * of_clk_detect_critical from their setup functions.
4819  *
4820  * Return: error code or zero on success
4821  */
4822 int of_clk_detect_critical(struct device_node *np,
4823                                           int index, unsigned long *flags)
4824 {
4825         struct property *prop;
4826         const __be32 *cur;
4827         uint32_t idx;
4828 
4829         if (!np || !flags)
4830                 return -EINVAL;
4831 
4832         of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4833                 if (index == idx)
4834                         *flags |= CLK_IS_CRITICAL;
4835 
4836         return 0;
4837 }
4838 
4839 /**
4840  * of_clk_init() - Scan and init clock providers from the DT
4841  * @matches: array of compatible values and init functions for providers.
4842  *
4843  * This function scans the device tree for matching clock providers
4844  * and calls their initialization functions. It also does it by trying
4845  * to follow the dependencies.
4846  */
4847 void __init of_clk_init(const struct of_device_id *matches)
4848 {
4849         const struct of_device_id *match;
4850         struct device_node *np;
4851         struct clock_provider *clk_provider, *next;
4852         bool is_init_done;
4853         bool force = false;
4854         LIST_HEAD(clk_provider_list);
4855 
4856         if (!matches)
4857                 matches = &__clk_of_table;
4858 
4859         /* First prepare the list of the clocks providers */
4860         for_each_matching_node_and_match(np, matches, &match) {
4861                 struct clock_provider *parent;
4862 
4863                 if (!of_device_is_available(np))
4864                         continue;
4865 
4866                 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4867                 if (!parent) {
4868                         list_for_each_entry_safe(clk_provider, next,
4869                                                  &clk_provider_list, node) {
4870                                 list_del(&clk_provider->node);
4871                                 of_node_put(clk_provider->np);
4872                                 kfree(clk_provider);
4873                         }
4874                         of_node_put(np);
4875                         return;
4876                 }
4877 
4878                 parent->clk_init_cb = match->data;
4879                 parent->np = of_node_get(np);
4880                 list_add_tail(&parent->node, &clk_provider_list);
4881         }
4882 
4883         while (!list_empty(&clk_provider_list)) {
4884                 is_init_done = false;
4885                 list_for_each_entry_safe(clk_provider, next,
4886                                         &clk_provider_list, node) {
4887                         if (force || parent_ready(clk_provider->np)) {
4888 
4889                                 /* Don't populate platform devices */
4890                                 of_node_set_flag(clk_provider->np,
4891                                                  OF_POPULATED);
4892 
4893                                 clk_provider->clk_init_cb(clk_provider->np);
4894                                 of_clk_set_defaults(clk_provider->np, true);
4895 
4896                                 list_del(&clk_provider->node);
4897                                 of_node_put(clk_provider->np);
4898                                 kfree(clk_provider);
4899                                 is_init_done = true;
4900                         }
4901                 }
4902 
4903                 /*
4904                  * We didn't manage to initialize any of the
4905                  * remaining providers during the last loop, so now we
4906                  * initialize all the remaining ones unconditionally
4907                  * in case the clock parent was not mandatory
4908                  */
4909                 if (!is_init_done)
4910                         force = true;
4911         }
4912 }
4913 #endif

/* [<][>][^][v][top][bottom][index][help] */