This source file includes following definitions.
- clk_factors_recalc_rate
- clk_factors_determine_rate
- clk_factors_set_rate
- __sunxi_factors_register
- sunxi_factors_register
- sunxi_factors_register_critical
- sunxi_factors_unregister
1
2
3
4
5
6
7
8 #include <linux/clk-provider.h>
9 #include <linux/delay.h>
10 #include <linux/err.h>
11 #include <linux/io.h>
12 #include <linux/of_address.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15
16 #include "clk-factors.h"
17
18
19
20
21
22
23
24
25
26
27
28
29 #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
30
31 #define FACTORS_MAX_PARENTS 5
32
33 #define SETMASK(len, pos) (((1U << (len)) - 1) << (pos))
34 #define CLRMASK(len, pos) (~(SETMASK(len, pos)))
35 #define FACTOR_GET(bit, len, reg) (((reg) & SETMASK(len, bit)) >> (bit))
36
37 #define FACTOR_SET(bit, len, reg, val) \
38 (((reg) & CLRMASK(len, bit)) | (val << (bit)))
39
40 static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
41 unsigned long parent_rate)
42 {
43 u8 n = 1, k = 0, p = 0, m = 0;
44 u32 reg;
45 unsigned long rate;
46 struct clk_factors *factors = to_clk_factors(hw);
47 const struct clk_factors_config *config = factors->config;
48
49
50 reg = readl(factors->reg);
51
52
53 if (config->nwidth != SUNXI_FACTORS_NOT_APPLICABLE)
54 n = FACTOR_GET(config->nshift, config->nwidth, reg);
55 if (config->kwidth != SUNXI_FACTORS_NOT_APPLICABLE)
56 k = FACTOR_GET(config->kshift, config->kwidth, reg);
57 if (config->mwidth != SUNXI_FACTORS_NOT_APPLICABLE)
58 m = FACTOR_GET(config->mshift, config->mwidth, reg);
59 if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
60 p = FACTOR_GET(config->pshift, config->pwidth, reg);
61
62 if (factors->recalc) {
63 struct factors_request factors_req = {
64 .parent_rate = parent_rate,
65 .n = n,
66 .k = k,
67 .m = m,
68 .p = p,
69 };
70
71
72 if (factors->mux)
73 factors_req.parent_index =
74 (reg >> factors->mux->shift) &
75 factors->mux->mask;
76
77 factors->recalc(&factors_req);
78
79 return factors_req.rate;
80 }
81
82
83 rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
84
85 return rate;
86 }
87
88 static int clk_factors_determine_rate(struct clk_hw *hw,
89 struct clk_rate_request *req)
90 {
91 struct clk_factors *factors = to_clk_factors(hw);
92 struct clk_hw *parent, *best_parent = NULL;
93 int i, num_parents;
94 unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
95
96
97 num_parents = clk_hw_get_num_parents(hw);
98 for (i = 0; i < num_parents; i++) {
99 struct factors_request factors_req = {
100 .rate = req->rate,
101 .parent_index = i,
102 };
103 parent = clk_hw_get_parent_by_index(hw, i);
104 if (!parent)
105 continue;
106 if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
107 parent_rate = clk_hw_round_rate(parent, req->rate);
108 else
109 parent_rate = clk_hw_get_rate(parent);
110
111 factors_req.parent_rate = parent_rate;
112 factors->get_factors(&factors_req);
113 child_rate = factors_req.rate;
114
115 if (child_rate <= req->rate && child_rate > best_child_rate) {
116 best_parent = parent;
117 best = parent_rate;
118 best_child_rate = child_rate;
119 }
120 }
121
122 if (!best_parent)
123 return -EINVAL;
124
125 req->best_parent_hw = best_parent;
126 req->best_parent_rate = best;
127 req->rate = best_child_rate;
128
129 return 0;
130 }
131
132 static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
133 unsigned long parent_rate)
134 {
135 struct factors_request req = {
136 .rate = rate,
137 .parent_rate = parent_rate,
138 };
139 u32 reg;
140 struct clk_factors *factors = to_clk_factors(hw);
141 const struct clk_factors_config *config = factors->config;
142 unsigned long flags = 0;
143
144 factors->get_factors(&req);
145
146 if (factors->lock)
147 spin_lock_irqsave(factors->lock, flags);
148
149
150 reg = readl(factors->reg);
151
152
153 reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
154 reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
155 reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
156 reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
157
158
159 writel(reg, factors->reg);
160
161
162 __delay((rate >> 20) * 500 / 2);
163
164 if (factors->lock)
165 spin_unlock_irqrestore(factors->lock, flags);
166
167 return 0;
168 }
169
170 static const struct clk_ops clk_factors_ops = {
171 .determine_rate = clk_factors_determine_rate,
172 .recalc_rate = clk_factors_recalc_rate,
173 .set_rate = clk_factors_set_rate,
174 };
175
176 static struct clk *__sunxi_factors_register(struct device_node *node,
177 const struct factors_data *data,
178 spinlock_t *lock, void __iomem *reg,
179 unsigned long flags)
180 {
181 struct clk *clk;
182 struct clk_factors *factors;
183 struct clk_gate *gate = NULL;
184 struct clk_mux *mux = NULL;
185 struct clk_hw *gate_hw = NULL;
186 struct clk_hw *mux_hw = NULL;
187 const char *clk_name = node->name;
188 const char *parents[FACTORS_MAX_PARENTS];
189 int ret, i = 0;
190
191
192 i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
193
194
195
196
197
198 if (data->name)
199 clk_name = data->name;
200 else
201 of_property_read_string(node, "clock-output-names", &clk_name);
202
203 factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
204 if (!factors)
205 goto err_factors;
206
207
208 factors->reg = reg;
209 factors->config = data->table;
210 factors->get_factors = data->getter;
211 factors->recalc = data->recalc;
212 factors->lock = lock;
213
214
215 if (data->enable) {
216 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
217 if (!gate)
218 goto err_gate;
219
220 factors->gate = gate;
221
222
223 gate->reg = reg;
224 gate->bit_idx = data->enable;
225 gate->lock = factors->lock;
226 gate_hw = &gate->hw;
227 }
228
229
230 if (data->mux) {
231 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
232 if (!mux)
233 goto err_mux;
234
235 factors->mux = mux;
236
237
238 mux->reg = reg;
239 mux->shift = data->mux;
240 mux->mask = data->muxmask;
241 mux->lock = factors->lock;
242 mux_hw = &mux->hw;
243 }
244
245 clk = clk_register_composite(NULL, clk_name,
246 parents, i,
247 mux_hw, &clk_mux_ops,
248 &factors->hw, &clk_factors_ops,
249 gate_hw, &clk_gate_ops, CLK_IS_CRITICAL);
250 if (IS_ERR(clk))
251 goto err_register;
252
253 ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
254 if (ret)
255 goto err_provider;
256
257 return clk;
258
259 err_provider:
260
261 clk_unregister(clk);
262 err_register:
263 kfree(mux);
264 err_mux:
265 kfree(gate);
266 err_gate:
267 kfree(factors);
268 err_factors:
269 return NULL;
270 }
271
272 struct clk *sunxi_factors_register(struct device_node *node,
273 const struct factors_data *data,
274 spinlock_t *lock,
275 void __iomem *reg)
276 {
277 return __sunxi_factors_register(node, data, lock, reg, 0);
278 }
279
280 struct clk *sunxi_factors_register_critical(struct device_node *node,
281 const struct factors_data *data,
282 spinlock_t *lock,
283 void __iomem *reg)
284 {
285 return __sunxi_factors_register(node, data, lock, reg, CLK_IS_CRITICAL);
286 }
287
288 void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
289 {
290 struct clk_hw *hw = __clk_get_hw(clk);
291 struct clk_factors *factors;
292
293 if (!hw)
294 return;
295
296 factors = to_clk_factors(hw);
297
298 of_clk_del_provider(node);
299
300 clk_unregister(clk);
301 kfree(factors->mux);
302 kfree(factors->gate);
303 kfree(factors);
304 }