This source file includes following definitions.
- ccu_nkm_find_best
- ccu_nkm_disable
- ccu_nkm_enable
- ccu_nkm_is_enabled
- ccu_nkm_recalc_rate
- ccu_nkm_round_rate
- ccu_nkm_determine_rate
- ccu_nkm_set_rate
- ccu_nkm_get_parent
- ccu_nkm_set_parent
1
2
3
4
5
6
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9
10 #include "ccu_gate.h"
11 #include "ccu_nkm.h"
12
13 struct _ccu_nkm {
14 unsigned long n, min_n, max_n;
15 unsigned long k, min_k, max_k;
16 unsigned long m, min_m, max_m;
17 };
18
19 static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
20 struct _ccu_nkm *nkm)
21 {
22 unsigned long best_rate = 0;
23 unsigned long best_n = 0, best_k = 0, best_m = 0;
24 unsigned long _n, _k, _m;
25
26 for (_k = nkm->min_k; _k <= nkm->max_k; _k++) {
27 for (_n = nkm->min_n; _n <= nkm->max_n; _n++) {
28 for (_m = nkm->min_m; _m <= nkm->max_m; _m++) {
29 unsigned long tmp_rate;
30
31 tmp_rate = parent * _n * _k / _m;
32
33 if (tmp_rate > rate)
34 continue;
35 if ((rate - tmp_rate) < (rate - best_rate)) {
36 best_rate = tmp_rate;
37 best_n = _n;
38 best_k = _k;
39 best_m = _m;
40 }
41 }
42 }
43 }
44
45 nkm->n = best_n;
46 nkm->k = best_k;
47 nkm->m = best_m;
48 }
49
50 static void ccu_nkm_disable(struct clk_hw *hw)
51 {
52 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
53
54 return ccu_gate_helper_disable(&nkm->common, nkm->enable);
55 }
56
57 static int ccu_nkm_enable(struct clk_hw *hw)
58 {
59 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
60
61 return ccu_gate_helper_enable(&nkm->common, nkm->enable);
62 }
63
64 static int ccu_nkm_is_enabled(struct clk_hw *hw)
65 {
66 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
67
68 return ccu_gate_helper_is_enabled(&nkm->common, nkm->enable);
69 }
70
71 static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
72 unsigned long parent_rate)
73 {
74 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
75 unsigned long n, m, k, rate;
76 u32 reg;
77
78 reg = readl(nkm->common.base + nkm->common.reg);
79
80 n = reg >> nkm->n.shift;
81 n &= (1 << nkm->n.width) - 1;
82 n += nkm->n.offset;
83 if (!n)
84 n++;
85
86 k = reg >> nkm->k.shift;
87 k &= (1 << nkm->k.width) - 1;
88 k += nkm->k.offset;
89 if (!k)
90 k++;
91
92 m = reg >> nkm->m.shift;
93 m &= (1 << nkm->m.width) - 1;
94 m += nkm->m.offset;
95 if (!m)
96 m++;
97
98 rate = parent_rate * n * k / m;
99
100 if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
101 rate /= nkm->fixed_post_div;
102
103 return rate;
104 }
105
106 static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
107 struct clk_hw *hw,
108 unsigned long *parent_rate,
109 unsigned long rate,
110 void *data)
111 {
112 struct ccu_nkm *nkm = data;
113 struct _ccu_nkm _nkm;
114
115 _nkm.min_n = nkm->n.min ?: 1;
116 _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width;
117 _nkm.min_k = nkm->k.min ?: 1;
118 _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width;
119 _nkm.min_m = 1;
120 _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
121
122 if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
123 rate *= nkm->fixed_post_div;
124
125 ccu_nkm_find_best(*parent_rate, rate, &_nkm);
126
127 rate = *parent_rate * _nkm.n * _nkm.k / _nkm.m;
128
129 if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
130 rate /= nkm->fixed_post_div;
131
132 return rate;
133 }
134
135 static int ccu_nkm_determine_rate(struct clk_hw *hw,
136 struct clk_rate_request *req)
137 {
138 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
139
140 return ccu_mux_helper_determine_rate(&nkm->common, &nkm->mux,
141 req, ccu_nkm_round_rate, nkm);
142 }
143
144 static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
145 unsigned long parent_rate)
146 {
147 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
148 struct _ccu_nkm _nkm;
149 unsigned long flags;
150 u32 reg;
151
152 if (nkm->common.features & CCU_FEATURE_FIXED_POSTDIV)
153 rate *= nkm->fixed_post_div;
154
155 _nkm.min_n = nkm->n.min ?: 1;
156 _nkm.max_n = nkm->n.max ?: 1 << nkm->n.width;
157 _nkm.min_k = nkm->k.min ?: 1;
158 _nkm.max_k = nkm->k.max ?: 1 << nkm->k.width;
159 _nkm.min_m = 1;
160 _nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
161
162 ccu_nkm_find_best(parent_rate, rate, &_nkm);
163
164 spin_lock_irqsave(nkm->common.lock, flags);
165
166 reg = readl(nkm->common.base + nkm->common.reg);
167 reg &= ~GENMASK(nkm->n.width + nkm->n.shift - 1, nkm->n.shift);
168 reg &= ~GENMASK(nkm->k.width + nkm->k.shift - 1, nkm->k.shift);
169 reg &= ~GENMASK(nkm->m.width + nkm->m.shift - 1, nkm->m.shift);
170
171 reg |= (_nkm.n - nkm->n.offset) << nkm->n.shift;
172 reg |= (_nkm.k - nkm->k.offset) << nkm->k.shift;
173 reg |= (_nkm.m - nkm->m.offset) << nkm->m.shift;
174 writel(reg, nkm->common.base + nkm->common.reg);
175
176 spin_unlock_irqrestore(nkm->common.lock, flags);
177
178 ccu_helper_wait_for_lock(&nkm->common, nkm->lock);
179
180 return 0;
181 }
182
183 static u8 ccu_nkm_get_parent(struct clk_hw *hw)
184 {
185 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
186
187 return ccu_mux_helper_get_parent(&nkm->common, &nkm->mux);
188 }
189
190 static int ccu_nkm_set_parent(struct clk_hw *hw, u8 index)
191 {
192 struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
193
194 return ccu_mux_helper_set_parent(&nkm->common, &nkm->mux, index);
195 }
196
197 const struct clk_ops ccu_nkm_ops = {
198 .disable = ccu_nkm_disable,
199 .enable = ccu_nkm_enable,
200 .is_enabled = ccu_nkm_is_enabled,
201
202 .get_parent = ccu_nkm_get_parent,
203 .set_parent = ccu_nkm_set_parent,
204
205 .determine_rate = ccu_nkm_determine_rate,
206 .recalc_rate = ccu_nkm_recalc_rate,
207 .set_rate = ccu_nkm_set_rate,
208 };