This source file includes following definitions.
- ap_cpu_clk_recalc_rate
- ap_cpu_clk_set_rate
- ap_cpu_clk_round_rate
- ap_cpu_clock_probe
1
2
3
4
5
6
7
8
9
10
11 #define pr_fmt(fmt) "ap-cpu-clk: " fmt
12
13 #include <linux/clk-provider.h>
14 #include <linux/clk.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/regmap.h>
21 #include "armada_ap_cp_helper.h"
22
23 #define AP806_CPU_CLUSTER0 0
24 #define AP806_CPU_CLUSTER1 1
25 #define AP806_CPUS_PER_CLUSTER 2
26 #define APN806_CPU1_MASK 0x1
27
28 #define APN806_CLUSTER_NUM_OFFSET 8
29 #define APN806_CLUSTER_NUM_MASK BIT(APN806_CLUSTER_NUM_OFFSET)
30
31 #define APN806_MAX_DIVIDER 32
32
33
34
35
36
37
38
39 struct cpu_dfs_regs {
40 unsigned int divider_reg;
41 unsigned int force_reg;
42 unsigned int ratio_reg;
43 unsigned int ratio_state_reg;
44 unsigned int divider_mask;
45 unsigned int cluster_offset;
46 unsigned int force_mask;
47 int divider_offset;
48 int divider_ratio;
49 int ratio_offset;
50 int ratio_state_offset;
51 int ratio_state_cluster_offset;
52 };
53
54
55 #define AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET 0x278
56 #define AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET 0x280
57 #define AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET 0x284
58 #define AP806_CA72MP2_0_PLL_SR_REG_OFFSET 0xC94
59
60 #define AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x14
61 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 0
62 #define AP806_PLL_CR_CPU_CLK_DIV_RATIO 0
63 #define AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
64 (0x3f << AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
65 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 24
66 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
67 (0x1 << AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
68 #define AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 16
69 #define AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET 0
70 #define AP806_CA72MP2_0_PLL_RATIO_STATE 11
71
72 #define STATUS_POLL_PERIOD_US 1
73 #define STATUS_POLL_TIMEOUT_US 1000000
74
75 #define to_ap_cpu_clk(_hw) container_of(_hw, struct ap_cpu_clk, hw)
76
77 static const struct cpu_dfs_regs ap806_dfs_regs = {
78 .divider_reg = AP806_CA72MP2_0_PLL_CR_0_REG_OFFSET,
79 .force_reg = AP806_CA72MP2_0_PLL_CR_1_REG_OFFSET,
80 .ratio_reg = AP806_CA72MP2_0_PLL_CR_2_REG_OFFSET,
81 .ratio_state_reg = AP806_CA72MP2_0_PLL_SR_REG_OFFSET,
82 .divider_mask = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK,
83 .cluster_offset = AP806_CA72MP2_0_PLL_CR_CLUSTER_OFFSET,
84 .force_mask = AP806_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK,
85 .divider_offset = AP806_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET,
86 .divider_ratio = AP806_PLL_CR_CPU_CLK_DIV_RATIO,
87 .ratio_offset = AP806_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET,
88 .ratio_state_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET,
89 .ratio_state_cluster_offset = AP806_CA72MP2_0_PLL_RATIO_STABLE_OFFSET,
90 };
91
92
93 #define AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET 0x278
94 #define AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET 0x27c
95 #define AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET 0xc98
96 #define AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET 0x8
97 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET 18
98 #define AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK \
99 (0x3f << AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET)
100 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET 12
101 #define AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK \
102 (0x3f << AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET)
103 #define AP807_PLL_CR_CPU_CLK_DIV_RATIO 3
104 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET 0
105 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK \
106 (0x3 << AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_OFFSET)
107 #define AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET 6
108 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET 20
109 #define AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET 3
110
111 static const struct cpu_dfs_regs ap807_dfs_regs = {
112 .divider_reg = AP807_DEVICE_GENERAL_CONTROL_10_REG_OFFSET,
113 .force_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET,
114 .ratio_reg = AP807_DEVICE_GENERAL_CONTROL_11_REG_OFFSET,
115 .ratio_state_reg = AP807_DEVICE_GENERAL_STATUS_6_REG_OFFSET,
116 .divider_mask = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_MASK,
117 .cluster_offset = AP807_CA72MP2_0_PLL_CR_CLUSTER_OFFSET,
118 .force_mask = AP807_PLL_CR_0_CPU_CLK_RELOAD_FORCE_MASK,
119 .divider_offset = AP807_PLL_CR_0_CPU_CLK_DIV_RATIO_OFFSET,
120 .divider_ratio = AP807_PLL_CR_CPU_CLK_DIV_RATIO,
121 .ratio_offset = AP807_PLL_CR_0_CPU_CLK_RELOAD_RATIO_OFFSET,
122 .ratio_state_offset = AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_OFFSET,
123 .ratio_state_cluster_offset =
124 AP807_CA72MP2_0_PLL_CLKDIV_RATIO_STABLE_CLUSTER_OFFSET
125 };
126
127
128
129
130
131
132
133
134
135 struct ap_cpu_clk {
136 unsigned int cluster;
137 const char *clk_name;
138 struct device *dev;
139 struct clk_hw hw;
140 struct regmap *pll_cr_base;
141 const struct cpu_dfs_regs *pll_regs;
142 };
143
144 static unsigned long ap_cpu_clk_recalc_rate(struct clk_hw *hw,
145 unsigned long parent_rate)
146 {
147 struct ap_cpu_clk *clk = to_ap_cpu_clk(hw);
148 unsigned int cpu_clkdiv_reg;
149 int cpu_clkdiv_ratio;
150
151 cpu_clkdiv_reg = clk->pll_regs->divider_reg +
152 (clk->cluster * clk->pll_regs->cluster_offset);
153 regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, &cpu_clkdiv_ratio);
154 cpu_clkdiv_ratio &= clk->pll_regs->divider_mask;
155 cpu_clkdiv_ratio >>= clk->pll_regs->divider_offset;
156
157 return parent_rate / cpu_clkdiv_ratio;
158 }
159
160 static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
161 unsigned long parent_rate)
162 {
163 struct ap_cpu_clk *clk = to_ap_cpu_clk(hw);
164 int ret, reg, divider = parent_rate / rate;
165 unsigned int cpu_clkdiv_reg, cpu_force_reg, cpu_ratio_reg, stable_bit;
166
167 cpu_clkdiv_reg = clk->pll_regs->divider_reg +
168 (clk->cluster * clk->pll_regs->cluster_offset);
169 cpu_force_reg = clk->pll_regs->force_reg +
170 (clk->cluster * clk->pll_regs->cluster_offset);
171 cpu_ratio_reg = clk->pll_regs->ratio_reg +
172 (clk->cluster * clk->pll_regs->cluster_offset);
173
174 regmap_read(clk->pll_cr_base, cpu_clkdiv_reg, ®);
175 reg &= ~(clk->pll_regs->divider_mask);
176 reg |= (divider << clk->pll_regs->divider_offset);
177
178
179
180
181
182 if (clk->pll_regs->divider_ratio) {
183 reg &= ~(AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_MASK);
184 reg |= ((divider * clk->pll_regs->divider_ratio) <<
185 AP807_PLL_CR_1_CPU_CLK_DIV_RATIO_OFFSET);
186 }
187 regmap_write(clk->pll_cr_base, cpu_clkdiv_reg, reg);
188
189
190 regmap_update_bits(clk->pll_cr_base, cpu_force_reg,
191 clk->pll_regs->force_mask,
192 clk->pll_regs->force_mask);
193
194 regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
195 BIT(clk->pll_regs->ratio_offset),
196 BIT(clk->pll_regs->ratio_offset));
197
198 stable_bit = BIT(clk->pll_regs->ratio_state_offset +
199 clk->cluster *
200 clk->pll_regs->ratio_state_cluster_offset),
201 ret = regmap_read_poll_timeout(clk->pll_cr_base,
202 clk->pll_regs->ratio_state_reg, reg,
203 reg & stable_bit, STATUS_POLL_PERIOD_US,
204 STATUS_POLL_TIMEOUT_US);
205 if (ret)
206 return ret;
207
208 regmap_update_bits(clk->pll_cr_base, cpu_ratio_reg,
209 BIT(clk->pll_regs->ratio_offset), 0);
210
211 return 0;
212 }
213
214 static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
215 unsigned long *parent_rate)
216 {
217 int divider = *parent_rate / rate;
218
219 divider = min(divider, APN806_MAX_DIVIDER);
220
221 return *parent_rate / divider;
222 }
223
224 static const struct clk_ops ap_cpu_clk_ops = {
225 .recalc_rate = ap_cpu_clk_recalc_rate,
226 .round_rate = ap_cpu_clk_round_rate,
227 .set_rate = ap_cpu_clk_set_rate,
228 };
229
230 static int ap_cpu_clock_probe(struct platform_device *pdev)
231 {
232 int ret, nclusters = 0, cluster_index = 0;
233 struct device *dev = &pdev->dev;
234 struct device_node *dn, *np = dev->of_node;
235 struct clk_hw_onecell_data *ap_cpu_data;
236 struct ap_cpu_clk *ap_cpu_clk;
237 struct regmap *regmap;
238
239 regmap = syscon_node_to_regmap(np->parent);
240 if (IS_ERR(regmap)) {
241 pr_err("cannot get pll_cr_base regmap\n");
242 return PTR_ERR(regmap);
243 }
244
245
246
247
248
249
250
251
252
253
254 nclusters = 1;
255 for_each_of_cpu_node(dn) {
256 int cpu, err;
257
258 err = of_property_read_u32(dn, "reg", &cpu);
259 if (WARN_ON(err))
260 return err;
261
262
263 if (cpu & APN806_CLUSTER_NUM_MASK) {
264 nclusters = 2;
265 break;
266 }
267 }
268
269
270
271
272 ap_cpu_clk = devm_kcalloc(dev, nclusters, sizeof(*ap_cpu_clk),
273 GFP_KERNEL);
274 if (!ap_cpu_clk)
275 return -ENOMEM;
276
277 ap_cpu_data = devm_kzalloc(dev, sizeof(*ap_cpu_data) +
278 sizeof(struct clk_hw *) * nclusters,
279 GFP_KERNEL);
280 if (!ap_cpu_data)
281 return -ENOMEM;
282
283 for_each_of_cpu_node(dn) {
284 char *clk_name = "cpu-cluster-0";
285 struct clk_init_data init;
286 const char *parent_name;
287 struct clk *parent;
288 int cpu, err;
289
290 err = of_property_read_u32(dn, "reg", &cpu);
291 if (WARN_ON(err))
292 return err;
293
294 cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
295 cluster_index >>= APN806_CLUSTER_NUM_OFFSET;
296
297
298 if (ap_cpu_data->hws[cluster_index])
299 continue;
300
301 parent = of_clk_get(np, cluster_index);
302 if (IS_ERR(parent)) {
303 dev_err(dev, "Could not get the clock parent\n");
304 return -EINVAL;
305 }
306 parent_name = __clk_get_name(parent);
307 clk_name[12] += cluster_index;
308 ap_cpu_clk[cluster_index].clk_name =
309 ap_cp_unique_name(dev, np->parent, clk_name);
310 ap_cpu_clk[cluster_index].cluster = cluster_index;
311 ap_cpu_clk[cluster_index].pll_cr_base = regmap;
312 ap_cpu_clk[cluster_index].hw.init = &init;
313 ap_cpu_clk[cluster_index].dev = dev;
314 ap_cpu_clk[cluster_index].pll_regs = of_device_get_match_data(&pdev->dev);
315
316 init.name = ap_cpu_clk[cluster_index].clk_name;
317 init.ops = &ap_cpu_clk_ops;
318 init.num_parents = 1;
319 init.parent_names = &parent_name;
320
321 ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw);
322 if (ret)
323 return ret;
324 ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw;
325 }
326
327 ap_cpu_data->num = cluster_index + 1;
328
329 ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, ap_cpu_data);
330 if (ret)
331 dev_err(dev, "failed to register OF clock provider\n");
332
333 return ret;
334 }
335
336 static const struct of_device_id ap_cpu_clock_of_match[] = {
337 {
338 .compatible = "marvell,ap806-cpu-clock",
339 .data = &ap806_dfs_regs,
340 },
341 {
342 .compatible = "marvell,ap807-cpu-clock",
343 .data = &ap807_dfs_regs,
344 },
345 { }
346 };
347
348 static struct platform_driver ap_cpu_clock_driver = {
349 .probe = ap_cpu_clock_probe,
350 .driver = {
351 .name = "marvell-ap-cpu-clock",
352 .of_match_table = ap_cpu_clock_of_match,
353 .suppress_bind_attrs = true,
354 },
355 };
356 builtin_platform_driver(ap_cpu_clock_driver);