This source file includes following definitions.
- wait_until_divider_stable
- wait_until_mux_stable
- exynos_cpuclk_round_rate
- exynos_cpuclk_recalc_rate
- exynos_set_safe_div
- exynos_cpuclk_pre_rate_change
- exynos_cpuclk_post_rate_change
- exynos5433_set_safe_div
- exynos5433_cpuclk_pre_rate_change
- exynos5433_cpuclk_post_rate_change
- exynos_cpuclk_notifier_cb
- exynos5433_cpuclk_notifier_cb
- exynos_register_cpu_clock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <linux/errno.h>
33 #include <linux/io.h>
34 #include <linux/slab.h>
35 #include <linux/clk.h>
36 #include <linux/clk-provider.h>
37 #include "clk-cpu.h"
38
39 #define E4210_SRC_CPU 0x0
40 #define E4210_STAT_CPU 0x200
41 #define E4210_DIV_CPU0 0x300
42 #define E4210_DIV_CPU1 0x304
43 #define E4210_DIV_STAT_CPU0 0x400
44 #define E4210_DIV_STAT_CPU1 0x404
45
46 #define E5433_MUX_SEL2 0x008
47 #define E5433_MUX_STAT2 0x208
48 #define E5433_DIV_CPU0 0x400
49 #define E5433_DIV_CPU1 0x404
50 #define E5433_DIV_STAT_CPU0 0x500
51 #define E5433_DIV_STAT_CPU1 0x504
52
53 #define E4210_DIV0_RATIO0_MASK 0x7
54 #define E4210_DIV1_HPM_MASK (0x7 << 4)
55 #define E4210_DIV1_COPY_MASK (0x7 << 0)
56 #define E4210_MUX_HPM_MASK (1 << 20)
57 #define E4210_DIV0_ATB_SHIFT 16
58 #define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
59
60 #define MAX_DIV 8
61 #define DIV_MASK 7
62 #define DIV_MASK_ALL 0xffffffff
63 #define MUX_MASK 7
64
65
66
67
68
69 static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
70 {
71 unsigned long timeout = jiffies + msecs_to_jiffies(10);
72
73 do {
74 if (!(readl(div_reg) & mask))
75 return;
76 } while (time_before(jiffies, timeout));
77
78 if (!(readl(div_reg) & mask))
79 return;
80
81 pr_err("%s: timeout in divider stablization\n", __func__);
82 }
83
84
85
86
87
88 static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
89 unsigned long mux_value)
90 {
91 unsigned long timeout = jiffies + msecs_to_jiffies(10);
92
93 do {
94 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
95 return;
96 } while (time_before(jiffies, timeout));
97
98 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
99 return;
100
101 pr_err("%s: re-parenting mux timed-out\n", __func__);
102 }
103
104
105 static long exynos_cpuclk_round_rate(struct clk_hw *hw,
106 unsigned long drate, unsigned long *prate)
107 {
108 struct clk_hw *parent = clk_hw_get_parent(hw);
109 *prate = clk_hw_round_rate(parent, drate);
110 return *prate;
111 }
112
113
114 static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
115 unsigned long parent_rate)
116 {
117
118
119
120
121
122
123
124 return parent_rate;
125 }
126
127 static const struct clk_ops exynos_cpuclk_clk_ops = {
128 .recalc_rate = exynos_cpuclk_recalc_rate,
129 .round_rate = exynos_cpuclk_round_rate,
130 };
131
132
133
134
135
136
137 static void exynos_set_safe_div(void __iomem *base, unsigned long div,
138 unsigned long mask)
139 {
140 unsigned long div0;
141
142 div0 = readl(base + E4210_DIV_CPU0);
143 div0 = (div0 & ~mask) | (div & mask);
144 writel(div0, base + E4210_DIV_CPU0);
145 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
146 }
147
148
149 static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
150 struct exynos_cpuclk *cpuclk, void __iomem *base)
151 {
152 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
153 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
154 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
155 unsigned long div0, div1 = 0, mux_reg;
156 unsigned long flags;
157
158
159 while ((cfg_data->prate * 1000) != ndata->new_rate) {
160 if (cfg_data->prate == 0)
161 return -EINVAL;
162 cfg_data++;
163 }
164
165 spin_lock_irqsave(cpuclk->lock, flags);
166
167
168
169
170
171
172 div0 = cfg_data->div0;
173 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
174 div1 = cfg_data->div1;
175 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
176 div1 = readl(base + E4210_DIV_CPU1) &
177 (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
178 }
179
180
181
182
183
184
185
186
187
188 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
189 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
190
191 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
192 WARN_ON(alt_div >= MAX_DIV);
193
194 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
195
196
197
198
199 alt_div |= E4210_DIV0_ATB_MASK;
200 alt_div_mask |= E4210_DIV0_ATB_MASK;
201 }
202 exynos_set_safe_div(base, alt_div, alt_div_mask);
203 div0 |= alt_div;
204 }
205
206
207 mux_reg = readl(base + E4210_SRC_CPU);
208 writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
209 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
210
211
212 writel(div0, base + E4210_DIV_CPU0);
213 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
214
215 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
216 writel(div1, base + E4210_DIV_CPU1);
217 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
218 DIV_MASK_ALL);
219 }
220
221 spin_unlock_irqrestore(cpuclk->lock, flags);
222 return 0;
223 }
224
225
226 static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
227 struct exynos_cpuclk *cpuclk, void __iomem *base)
228 {
229 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
230 unsigned long div = 0, div_mask = DIV_MASK;
231 unsigned long mux_reg;
232 unsigned long flags;
233
234
235 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
236 while ((cfg_data->prate * 1000) != ndata->new_rate) {
237 if (cfg_data->prate == 0)
238 return -EINVAL;
239 cfg_data++;
240 }
241 }
242
243 spin_lock_irqsave(cpuclk->lock, flags);
244
245
246 mux_reg = readl(base + E4210_SRC_CPU);
247 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
248 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
249
250 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
251 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
252 div_mask |= E4210_DIV0_ATB_MASK;
253 }
254
255 exynos_set_safe_div(base, div, div_mask);
256 spin_unlock_irqrestore(cpuclk->lock, flags);
257 return 0;
258 }
259
260
261
262
263
264
265 static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
266 unsigned long mask)
267 {
268 unsigned long div0;
269
270 div0 = readl(base + E5433_DIV_CPU0);
271 div0 = (div0 & ~mask) | (div & mask);
272 writel(div0, base + E5433_DIV_CPU0);
273 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
274 }
275
276
277 static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
278 struct exynos_cpuclk *cpuclk, void __iomem *base)
279 {
280 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
281 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
282 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
283 unsigned long div0, div1 = 0, mux_reg;
284 unsigned long flags;
285
286
287 while ((cfg_data->prate * 1000) != ndata->new_rate) {
288 if (cfg_data->prate == 0)
289 return -EINVAL;
290 cfg_data++;
291 }
292
293 spin_lock_irqsave(cpuclk->lock, flags);
294
295
296
297
298
299 div0 = cfg_data->div0;
300 div1 = cfg_data->div1;
301
302
303
304
305
306
307
308
309
310 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
311 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
312
313 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
314 WARN_ON(alt_div >= MAX_DIV);
315
316 exynos5433_set_safe_div(base, alt_div, alt_div_mask);
317 div0 |= alt_div;
318 }
319
320
321 mux_reg = readl(base + E5433_MUX_SEL2);
322 writel(mux_reg | 1, base + E5433_MUX_SEL2);
323 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
324
325
326 writel(div0, base + E5433_DIV_CPU0);
327 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
328
329 writel(div1, base + E5433_DIV_CPU1);
330 wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
331
332 spin_unlock_irqrestore(cpuclk->lock, flags);
333 return 0;
334 }
335
336
337 static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
338 struct exynos_cpuclk *cpuclk, void __iomem *base)
339 {
340 unsigned long div = 0, div_mask = DIV_MASK;
341 unsigned long mux_reg;
342 unsigned long flags;
343
344 spin_lock_irqsave(cpuclk->lock, flags);
345
346
347 mux_reg = readl(base + E5433_MUX_SEL2);
348 writel(mux_reg & ~1, base + E5433_MUX_SEL2);
349 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
350
351 exynos5433_set_safe_div(base, div, div_mask);
352 spin_unlock_irqrestore(cpuclk->lock, flags);
353 return 0;
354 }
355
356
357
358
359
360 static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
361 unsigned long event, void *data)
362 {
363 struct clk_notifier_data *ndata = data;
364 struct exynos_cpuclk *cpuclk;
365 void __iomem *base;
366 int err = 0;
367
368 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
369 base = cpuclk->ctrl_base;
370
371 if (event == PRE_RATE_CHANGE)
372 err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
373 else if (event == POST_RATE_CHANGE)
374 err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
375
376 return notifier_from_errno(err);
377 }
378
379
380
381
382
383 static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
384 unsigned long event, void *data)
385 {
386 struct clk_notifier_data *ndata = data;
387 struct exynos_cpuclk *cpuclk;
388 void __iomem *base;
389 int err = 0;
390
391 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
392 base = cpuclk->ctrl_base;
393
394 if (event == PRE_RATE_CHANGE)
395 err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
396 else if (event == POST_RATE_CHANGE)
397 err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
398
399 return notifier_from_errno(err);
400 }
401
402
403 int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
404 unsigned int lookup_id, const char *name, const char *parent,
405 const char *alt_parent, unsigned long offset,
406 const struct exynos_cpuclk_cfg_data *cfg,
407 unsigned long num_cfgs, unsigned long flags)
408 {
409 struct exynos_cpuclk *cpuclk;
410 struct clk_init_data init;
411 struct clk *parent_clk;
412 int ret = 0;
413
414 cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
415 if (!cpuclk)
416 return -ENOMEM;
417
418 init.name = name;
419 init.flags = CLK_SET_RATE_PARENT;
420 init.parent_names = &parent;
421 init.num_parents = 1;
422 init.ops = &exynos_cpuclk_clk_ops;
423
424 cpuclk->hw.init = &init;
425 cpuclk->ctrl_base = ctx->reg_base + offset;
426 cpuclk->lock = &ctx->lock;
427 cpuclk->flags = flags;
428 if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
429 cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
430 else
431 cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
432
433 cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
434 if (!cpuclk->alt_parent) {
435 pr_err("%s: could not lookup alternate parent %s\n",
436 __func__, alt_parent);
437 ret = -EINVAL;
438 goto free_cpuclk;
439 }
440
441 parent_clk = __clk_lookup(parent);
442 if (!parent_clk) {
443 pr_err("%s: could not lookup parent clock %s\n",
444 __func__, parent);
445 ret = -EINVAL;
446 goto free_cpuclk;
447 }
448
449 ret = clk_notifier_register(parent_clk, &cpuclk->clk_nb);
450 if (ret) {
451 pr_err("%s: failed to register clock notifier for %s\n",
452 __func__, name);
453 goto free_cpuclk;
454 }
455
456 cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
457 if (!cpuclk->cfg) {
458 ret = -ENOMEM;
459 goto unregister_clk_nb;
460 }
461
462 ret = clk_hw_register(NULL, &cpuclk->hw);
463 if (ret) {
464 pr_err("%s: could not register cpuclk %s\n", __func__, name);
465 goto free_cpuclk_data;
466 }
467
468 samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
469 return 0;
470
471 free_cpuclk_data:
472 kfree(cpuclk->cfg);
473 unregister_clk_nb:
474 clk_notifier_unregister(parent_clk, &cpuclk->clk_nb);
475 free_cpuclk:
476 kfree(cpuclk);
477 return ret;
478 }