This source file includes following definitions.
- _register_dpll
- _register_dpll_x2
- of_ti_dpll_setup
- of_ti_omap4_dpll_x2_setup
- of_ti_am3_dpll_x2_setup
- of_ti_omap3_dpll_setup
- of_ti_omap3_core_dpll_setup
- of_ti_omap3_per_dpll_setup
- of_ti_omap3_per_jtype_dpll_setup
- of_ti_omap4_dpll_setup
- of_ti_omap5_mpu_dpll_setup
- of_ti_omap4_core_dpll_setup
- of_ti_omap4_m4xen_dpll_setup
- of_ti_omap4_jtype_dpll_setup
- of_ti_am3_no_gate_dpll_setup
- of_ti_am3_jtype_dpll_setup
- of_ti_am3_no_gate_jtype_dpll_setup
- of_ti_am3_dpll_setup
- of_ti_am3_core_dpll_setup
- of_ti_omap2_core_dpll_setup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk/ti.h>
25 #include "clock.h"
26
27 #undef pr_fmt
28 #define pr_fmt(fmt) "%s: " fmt, __func__
29
30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 defined(CONFIG_SOC_DRA7XX)
32 static const struct clk_ops dpll_m4xen_ck_ops = {
33 .enable = &omap3_noncore_dpll_enable,
34 .disable = &omap3_noncore_dpll_disable,
35 .recalc_rate = &omap4_dpll_regm4xen_recalc,
36 .round_rate = &omap4_dpll_regm4xen_round_rate,
37 .set_rate = &omap3_noncore_dpll_set_rate,
38 .set_parent = &omap3_noncore_dpll_set_parent,
39 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
40 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
41 .get_parent = &omap2_init_dpll_parent,
42 .save_context = &omap3_core_dpll_save_context,
43 .restore_context = &omap3_core_dpll_restore_context,
44 };
45 #else
46 static const struct clk_ops dpll_m4xen_ck_ops = {};
47 #endif
48
49 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
50 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
51 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
52 static const struct clk_ops dpll_core_ck_ops = {
53 .recalc_rate = &omap3_dpll_recalc,
54 .get_parent = &omap2_init_dpll_parent,
55 };
56
57 static const struct clk_ops dpll_ck_ops = {
58 .enable = &omap3_noncore_dpll_enable,
59 .disable = &omap3_noncore_dpll_disable,
60 .recalc_rate = &omap3_dpll_recalc,
61 .round_rate = &omap2_dpll_round_rate,
62 .set_rate = &omap3_noncore_dpll_set_rate,
63 .set_parent = &omap3_noncore_dpll_set_parent,
64 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
65 .determine_rate = &omap3_noncore_dpll_determine_rate,
66 .get_parent = &omap2_init_dpll_parent,
67 .save_context = &omap3_noncore_dpll_save_context,
68 .restore_context = &omap3_noncore_dpll_restore_context,
69 };
70
71 static const struct clk_ops dpll_no_gate_ck_ops = {
72 .recalc_rate = &omap3_dpll_recalc,
73 .get_parent = &omap2_init_dpll_parent,
74 .round_rate = &omap2_dpll_round_rate,
75 .set_rate = &omap3_noncore_dpll_set_rate,
76 .set_parent = &omap3_noncore_dpll_set_parent,
77 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
78 .determine_rate = &omap3_noncore_dpll_determine_rate,
79 .save_context = &omap3_noncore_dpll_save_context,
80 .restore_context = &omap3_noncore_dpll_restore_context
81 };
82 #else
83 static const struct clk_ops dpll_core_ck_ops = {};
84 static const struct clk_ops dpll_ck_ops = {};
85 static const struct clk_ops dpll_no_gate_ck_ops = {};
86 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
87 #endif
88
89 #ifdef CONFIG_ARCH_OMAP2
90 static const struct clk_ops omap2_dpll_core_ck_ops = {
91 .get_parent = &omap2_init_dpll_parent,
92 .recalc_rate = &omap2_dpllcore_recalc,
93 .round_rate = &omap2_dpll_round_rate,
94 .set_rate = &omap2_reprogram_dpllcore,
95 };
96 #else
97 static const struct clk_ops omap2_dpll_core_ck_ops = {};
98 #endif
99
100 #ifdef CONFIG_ARCH_OMAP3
101 static const struct clk_ops omap3_dpll_core_ck_ops = {
102 .get_parent = &omap2_init_dpll_parent,
103 .recalc_rate = &omap3_dpll_recalc,
104 .round_rate = &omap2_dpll_round_rate,
105 };
106 #else
107 static const struct clk_ops omap3_dpll_core_ck_ops = {};
108 #endif
109
110 #ifdef CONFIG_ARCH_OMAP3
111 static const struct clk_ops omap3_dpll_ck_ops = {
112 .enable = &omap3_noncore_dpll_enable,
113 .disable = &omap3_noncore_dpll_disable,
114 .get_parent = &omap2_init_dpll_parent,
115 .recalc_rate = &omap3_dpll_recalc,
116 .set_rate = &omap3_noncore_dpll_set_rate,
117 .set_parent = &omap3_noncore_dpll_set_parent,
118 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
119 .determine_rate = &omap3_noncore_dpll_determine_rate,
120 .round_rate = &omap2_dpll_round_rate,
121 };
122
123 static const struct clk_ops omap3_dpll5_ck_ops = {
124 .enable = &omap3_noncore_dpll_enable,
125 .disable = &omap3_noncore_dpll_disable,
126 .get_parent = &omap2_init_dpll_parent,
127 .recalc_rate = &omap3_dpll_recalc,
128 .set_rate = &omap3_dpll5_set_rate,
129 .set_parent = &omap3_noncore_dpll_set_parent,
130 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
131 .determine_rate = &omap3_noncore_dpll_determine_rate,
132 .round_rate = &omap2_dpll_round_rate,
133 };
134
135 static const struct clk_ops omap3_dpll_per_ck_ops = {
136 .enable = &omap3_noncore_dpll_enable,
137 .disable = &omap3_noncore_dpll_disable,
138 .get_parent = &omap2_init_dpll_parent,
139 .recalc_rate = &omap3_dpll_recalc,
140 .set_rate = &omap3_dpll4_set_rate,
141 .set_parent = &omap3_noncore_dpll_set_parent,
142 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
143 .determine_rate = &omap3_noncore_dpll_determine_rate,
144 .round_rate = &omap2_dpll_round_rate,
145 };
146 #endif
147
148 static const struct clk_ops dpll_x2_ck_ops = {
149 .recalc_rate = &omap3_clkoutx2_recalc,
150 };
151
152
153
154
155
156
157
158
159
160
161 static void __init _register_dpll(void *user,
162 struct device_node *node)
163 {
164 struct clk_hw *hw = user;
165 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
166 struct dpll_data *dd = clk_hw->dpll_data;
167 struct clk *clk;
168 const struct clk_init_data *init = hw->init;
169
170 clk = of_clk_get(node, 0);
171 if (IS_ERR(clk)) {
172 pr_debug("clk-ref missing for %pOFn, retry later\n",
173 node);
174 if (!ti_clk_retry_init(node, hw, _register_dpll))
175 return;
176
177 goto cleanup;
178 }
179
180 dd->clk_ref = __clk_get_hw(clk);
181
182 clk = of_clk_get(node, 1);
183
184 if (IS_ERR(clk)) {
185 pr_debug("clk-bypass missing for %pOFn, retry later\n",
186 node);
187 if (!ti_clk_retry_init(node, hw, _register_dpll))
188 return;
189
190 goto cleanup;
191 }
192
193 dd->clk_bypass = __clk_get_hw(clk);
194
195
196 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
197
198 if (!IS_ERR(clk)) {
199 of_clk_add_provider(node, of_clk_src_simple_get, clk);
200 kfree(init->parent_names);
201 kfree(init);
202 return;
203 }
204
205 cleanup:
206 kfree(clk_hw->dpll_data);
207 kfree(init->parent_names);
208 kfree(init);
209 kfree(clk_hw);
210 }
211
212 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
213 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
214 defined(CONFIG_SOC_AM43XX)
215
216
217
218
219
220
221
222
223 static void _register_dpll_x2(struct device_node *node,
224 const struct clk_ops *ops,
225 const struct clk_hw_omap_ops *hw_ops)
226 {
227 struct clk *clk;
228 struct clk_init_data init = { NULL };
229 struct clk_hw_omap *clk_hw;
230 const char *name = node->name;
231 const char *parent_name;
232
233 parent_name = of_clk_get_parent_name(node, 0);
234 if (!parent_name) {
235 pr_err("%pOFn must have parent\n", node);
236 return;
237 }
238
239 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
240 if (!clk_hw)
241 return;
242
243 clk_hw->ops = hw_ops;
244 clk_hw->hw.init = &init;
245
246 init.name = name;
247 init.ops = ops;
248 init.parent_names = &parent_name;
249 init.num_parents = 1;
250
251 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
252 defined(CONFIG_SOC_DRA7XX)
253 if (hw_ops == &clkhwops_omap4_dpllmx) {
254 int ret;
255
256
257 ret = of_property_count_elems_of_size(node, "reg", 1);
258 if (ret <= 0) {
259 clk_hw->ops = NULL;
260 } else if (ti_clk_get_reg_addr(node, 0, &clk_hw->clksel_reg)) {
261 kfree(clk_hw);
262 return;
263 }
264 }
265 #endif
266
267
268 clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name);
269
270 if (IS_ERR(clk))
271 kfree(clk_hw);
272 else
273 of_clk_add_provider(node, of_clk_src_simple_get, clk);
274 }
275 #endif
276
277
278
279
280
281
282
283
284
285 static void __init of_ti_dpll_setup(struct device_node *node,
286 const struct clk_ops *ops,
287 const struct dpll_data *ddt)
288 {
289 struct clk_hw_omap *clk_hw = NULL;
290 struct clk_init_data *init = NULL;
291 const char **parent_names = NULL;
292 struct dpll_data *dd = NULL;
293 u8 dpll_mode = 0;
294
295 dd = kmemdup(ddt, sizeof(*dd), GFP_KERNEL);
296 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
297 init = kzalloc(sizeof(*init), GFP_KERNEL);
298 if (!dd || !clk_hw || !init)
299 goto cleanup;
300
301 clk_hw->dpll_data = dd;
302 clk_hw->ops = &clkhwops_omap3_dpll;
303 clk_hw->hw.init = init;
304
305 init->name = node->name;
306 init->ops = ops;
307
308 init->num_parents = of_clk_get_parent_count(node);
309 if (!init->num_parents) {
310 pr_err("%pOFn must have parent(s)\n", node);
311 goto cleanup;
312 }
313
314 parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL);
315 if (!parent_names)
316 goto cleanup;
317
318 of_clk_parent_fill(node, parent_names, init->num_parents);
319
320 init->parent_names = parent_names;
321
322 if (ti_clk_get_reg_addr(node, 0, &dd->control_reg))
323 goto cleanup;
324
325
326
327
328
329
330 if (!dd->idlest_mask) {
331 if (ti_clk_get_reg_addr(node, 1, &dd->mult_div1_reg))
332 goto cleanup;
333 #ifdef CONFIG_ARCH_OMAP2
334 clk_hw->ops = &clkhwops_omap2xxx_dpll;
335 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
336 #endif
337 } else {
338 if (ti_clk_get_reg_addr(node, 1, &dd->idlest_reg))
339 goto cleanup;
340
341 if (ti_clk_get_reg_addr(node, 2, &dd->mult_div1_reg))
342 goto cleanup;
343 }
344
345 if (dd->autoidle_mask) {
346 if (ti_clk_get_reg_addr(node, 3, &dd->autoidle_reg))
347 goto cleanup;
348 }
349
350 if (of_property_read_bool(node, "ti,low-power-stop"))
351 dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
352
353 if (of_property_read_bool(node, "ti,low-power-bypass"))
354 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
355
356 if (of_property_read_bool(node, "ti,lock"))
357 dpll_mode |= 1 << DPLL_LOCKED;
358
359 if (dpll_mode)
360 dd->modes = dpll_mode;
361
362 _register_dpll(&clk_hw->hw, node);
363 return;
364
365 cleanup:
366 kfree(dd);
367 kfree(parent_names);
368 kfree(init);
369 kfree(clk_hw);
370 }
371
372 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
373 defined(CONFIG_SOC_DRA7XX)
374 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
375 {
376 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
377 }
378 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
379 of_ti_omap4_dpll_x2_setup);
380 #endif
381
382 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
383 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
384 {
385 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
386 }
387 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
388 of_ti_am3_dpll_x2_setup);
389 #endif
390
391 #ifdef CONFIG_ARCH_OMAP3
392 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
393 {
394 const struct dpll_data dd = {
395 .idlest_mask = 0x1,
396 .enable_mask = 0x7,
397 .autoidle_mask = 0x7,
398 .mult_mask = 0x7ff << 8,
399 .div1_mask = 0x7f,
400 .max_multiplier = 2047,
401 .max_divider = 128,
402 .min_divider = 1,
403 .freqsel_mask = 0xf0,
404 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
405 };
406
407 if ((of_machine_is_compatible("ti,omap3630") ||
408 of_machine_is_compatible("ti,omap36xx")) &&
409 of_node_name_eq(node, "dpll5_ck"))
410 of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
411 else
412 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
413 }
414 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
415 of_ti_omap3_dpll_setup);
416
417 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
418 {
419 const struct dpll_data dd = {
420 .idlest_mask = 0x1,
421 .enable_mask = 0x7,
422 .autoidle_mask = 0x7,
423 .mult_mask = 0x7ff << 16,
424 .div1_mask = 0x7f << 8,
425 .max_multiplier = 2047,
426 .max_divider = 128,
427 .min_divider = 1,
428 .freqsel_mask = 0xf0,
429 };
430
431 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
432 }
433 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
434 of_ti_omap3_core_dpll_setup);
435
436 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
437 {
438 const struct dpll_data dd = {
439 .idlest_mask = 0x1 << 1,
440 .enable_mask = 0x7 << 16,
441 .autoidle_mask = 0x7 << 3,
442 .mult_mask = 0x7ff << 8,
443 .div1_mask = 0x7f,
444 .max_multiplier = 2047,
445 .max_divider = 128,
446 .min_divider = 1,
447 .freqsel_mask = 0xf00000,
448 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
449 };
450
451 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
452 }
453 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
454 of_ti_omap3_per_dpll_setup);
455
456 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
457 {
458 const struct dpll_data dd = {
459 .idlest_mask = 0x1 << 1,
460 .enable_mask = 0x7 << 16,
461 .autoidle_mask = 0x7 << 3,
462 .mult_mask = 0xfff << 8,
463 .div1_mask = 0x7f,
464 .max_multiplier = 4095,
465 .max_divider = 128,
466 .min_divider = 1,
467 .sddiv_mask = 0xff << 24,
468 .dco_mask = 0xe << 20,
469 .flags = DPLL_J_TYPE,
470 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
471 };
472
473 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
474 }
475 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
476 of_ti_omap3_per_jtype_dpll_setup);
477 #endif
478
479 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
480 {
481 const struct dpll_data dd = {
482 .idlest_mask = 0x1,
483 .enable_mask = 0x7,
484 .autoidle_mask = 0x7,
485 .mult_mask = 0x7ff << 8,
486 .div1_mask = 0x7f,
487 .max_multiplier = 2047,
488 .max_divider = 128,
489 .min_divider = 1,
490 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
491 };
492
493 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
494 }
495 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
496 of_ti_omap4_dpll_setup);
497
498 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
499 {
500 const struct dpll_data dd = {
501 .idlest_mask = 0x1,
502 .enable_mask = 0x7,
503 .autoidle_mask = 0x7,
504 .mult_mask = 0x7ff << 8,
505 .div1_mask = 0x7f,
506 .max_multiplier = 2047,
507 .max_divider = 128,
508 .dcc_mask = BIT(22),
509 .dcc_rate = 1400000000,
510 .min_divider = 1,
511 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
512 };
513
514 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
515 }
516 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
517 of_ti_omap5_mpu_dpll_setup);
518
519 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
520 {
521 const struct dpll_data dd = {
522 .idlest_mask = 0x1,
523 .enable_mask = 0x7,
524 .autoidle_mask = 0x7,
525 .mult_mask = 0x7ff << 8,
526 .div1_mask = 0x7f,
527 .max_multiplier = 2047,
528 .max_divider = 128,
529 .min_divider = 1,
530 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
531 };
532
533 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
534 }
535 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
536 of_ti_omap4_core_dpll_setup);
537
538 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
539 defined(CONFIG_SOC_DRA7XX)
540 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
541 {
542 const struct dpll_data dd = {
543 .idlest_mask = 0x1,
544 .enable_mask = 0x7,
545 .autoidle_mask = 0x7,
546 .mult_mask = 0x7ff << 8,
547 .div1_mask = 0x7f,
548 .max_multiplier = 2047,
549 .max_divider = 128,
550 .min_divider = 1,
551 .m4xen_mask = 0x800,
552 .lpmode_mask = 1 << 10,
553 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
554 };
555
556 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
557 }
558 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
559 of_ti_omap4_m4xen_dpll_setup);
560
561 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
562 {
563 const struct dpll_data dd = {
564 .idlest_mask = 0x1,
565 .enable_mask = 0x7,
566 .autoidle_mask = 0x7,
567 .mult_mask = 0xfff << 8,
568 .div1_mask = 0xff,
569 .max_multiplier = 4095,
570 .max_divider = 256,
571 .min_divider = 1,
572 .sddiv_mask = 0xff << 24,
573 .flags = DPLL_J_TYPE,
574 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
575 };
576
577 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
578 }
579 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
580 of_ti_omap4_jtype_dpll_setup);
581 #endif
582
583 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
584 {
585 const struct dpll_data dd = {
586 .idlest_mask = 0x1,
587 .enable_mask = 0x7,
588 .mult_mask = 0x7ff << 8,
589 .div1_mask = 0x7f,
590 .max_multiplier = 2047,
591 .max_divider = 128,
592 .min_divider = 1,
593 .max_rate = 1000000000,
594 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
595 };
596
597 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
598 }
599 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
600 of_ti_am3_no_gate_dpll_setup);
601
602 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
603 {
604 const struct dpll_data dd = {
605 .idlest_mask = 0x1,
606 .enable_mask = 0x7,
607 .mult_mask = 0x7ff << 8,
608 .div1_mask = 0x7f,
609 .max_multiplier = 4095,
610 .max_divider = 256,
611 .min_divider = 2,
612 .flags = DPLL_J_TYPE,
613 .max_rate = 2000000000,
614 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
615 };
616
617 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
618 }
619 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
620 of_ti_am3_jtype_dpll_setup);
621
622 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
623 {
624 const struct dpll_data dd = {
625 .idlest_mask = 0x1,
626 .enable_mask = 0x7,
627 .mult_mask = 0x7ff << 8,
628 .div1_mask = 0x7f,
629 .max_multiplier = 2047,
630 .max_divider = 128,
631 .min_divider = 1,
632 .max_rate = 2000000000,
633 .flags = DPLL_J_TYPE,
634 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
635 };
636
637 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
638 }
639 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
640 "ti,am3-dpll-no-gate-j-type-clock",
641 of_ti_am3_no_gate_jtype_dpll_setup);
642
643 static void __init of_ti_am3_dpll_setup(struct device_node *node)
644 {
645 const struct dpll_data dd = {
646 .idlest_mask = 0x1,
647 .enable_mask = 0x7,
648 .mult_mask = 0x7ff << 8,
649 .div1_mask = 0x7f,
650 .max_multiplier = 2047,
651 .max_divider = 128,
652 .min_divider = 1,
653 .max_rate = 1000000000,
654 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
655 };
656
657 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
658 }
659 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
660
661 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
662 {
663 const struct dpll_data dd = {
664 .idlest_mask = 0x1,
665 .enable_mask = 0x7,
666 .mult_mask = 0x7ff << 8,
667 .div1_mask = 0x7f,
668 .max_multiplier = 2047,
669 .max_divider = 128,
670 .min_divider = 1,
671 .max_rate = 1000000000,
672 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
673 };
674
675 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
676 }
677 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
678 of_ti_am3_core_dpll_setup);
679
680 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
681 {
682 const struct dpll_data dd = {
683 .enable_mask = 0x3,
684 .mult_mask = 0x3ff << 12,
685 .div1_mask = 0xf << 8,
686 .max_divider = 16,
687 .min_divider = 1,
688 };
689
690 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
691 }
692 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
693 of_ti_omap2_core_dpll_setup);