This source file includes following definitions.
- rpmhpd_send_corner
- to_active_sleep
- rpmhpd_aggregate_corner
- rpmhpd_power_on
- rpmhpd_power_off
- rpmhpd_set_performance_state
- rpmhpd_get_performance_state
- rpmhpd_update_level_mapping
- rpmhpd_probe
- rpmhpd_init
1
2
3
4 #include <linux/err.h>
5 #include <linux/init.h>
6 #include <linux/kernel.h>
7 #include <linux/mutex.h>
8 #include <linux/pm_domain.h>
9 #include <linux/slab.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <soc/qcom/cmd-db.h>
15 #include <soc/qcom/rpmh.h>
16 #include <dt-bindings/power/qcom-rpmpd.h>
17
18 #define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
19
20 #define RPMH_ARC_MAX_LEVELS 16
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 struct rpmhpd {
39 struct device *dev;
40 struct generic_pm_domain pd;
41 struct generic_pm_domain *parent;
42 struct rpmhpd *peer;
43 const bool active_only;
44 unsigned int corner;
45 unsigned int active_corner;
46 u32 level[RPMH_ARC_MAX_LEVELS];
47 size_t level_count;
48 bool enabled;
49 const char *res_name;
50 u32 addr;
51 };
52
53 struct rpmhpd_desc {
54 struct rpmhpd **rpmhpds;
55 size_t num_pds;
56 };
57
58 static DEFINE_MUTEX(rpmhpd_lock);
59
60
61
62 static struct rpmhpd sdm845_ebi = {
63 .pd = { .name = "ebi", },
64 .res_name = "ebi.lvl",
65 };
66
67 static struct rpmhpd sdm845_lmx = {
68 .pd = { .name = "lmx", },
69 .res_name = "lmx.lvl",
70 };
71
72 static struct rpmhpd sdm845_lcx = {
73 .pd = { .name = "lcx", },
74 .res_name = "lcx.lvl",
75 };
76
77 static struct rpmhpd sdm845_gfx = {
78 .pd = { .name = "gfx", },
79 .res_name = "gfx.lvl",
80 };
81
82 static struct rpmhpd sdm845_mss = {
83 .pd = { .name = "mss", },
84 .res_name = "mss.lvl",
85 };
86
87 static struct rpmhpd sdm845_mx_ao;
88 static struct rpmhpd sdm845_mx = {
89 .pd = { .name = "mx", },
90 .peer = &sdm845_mx_ao,
91 .res_name = "mx.lvl",
92 };
93
94 static struct rpmhpd sdm845_mx_ao = {
95 .pd = { .name = "mx_ao", },
96 .active_only = true,
97 .peer = &sdm845_mx,
98 .res_name = "mx.lvl",
99 };
100
101 static struct rpmhpd sdm845_cx_ao;
102 static struct rpmhpd sdm845_cx = {
103 .pd = { .name = "cx", },
104 .peer = &sdm845_cx_ao,
105 .parent = &sdm845_mx.pd,
106 .res_name = "cx.lvl",
107 };
108
109 static struct rpmhpd sdm845_cx_ao = {
110 .pd = { .name = "cx_ao", },
111 .active_only = true,
112 .peer = &sdm845_cx,
113 .parent = &sdm845_mx_ao.pd,
114 .res_name = "cx.lvl",
115 };
116
117 static struct rpmhpd *sdm845_rpmhpds[] = {
118 [SDM845_EBI] = &sdm845_ebi,
119 [SDM845_MX] = &sdm845_mx,
120 [SDM845_MX_AO] = &sdm845_mx_ao,
121 [SDM845_CX] = &sdm845_cx,
122 [SDM845_CX_AO] = &sdm845_cx_ao,
123 [SDM845_LMX] = &sdm845_lmx,
124 [SDM845_LCX] = &sdm845_lcx,
125 [SDM845_GFX] = &sdm845_gfx,
126 [SDM845_MSS] = &sdm845_mss,
127 };
128
129 static const struct rpmhpd_desc sdm845_desc = {
130 .rpmhpds = sdm845_rpmhpds,
131 .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
132 };
133
134 static const struct of_device_id rpmhpd_match_table[] = {
135 { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
136 { }
137 };
138
139 static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
140 unsigned int corner, bool sync)
141 {
142 struct tcs_cmd cmd = {
143 .addr = pd->addr,
144 .data = corner,
145 };
146
147
148
149
150
151 if (sync)
152 return rpmh_write(pd->dev, state, &cmd, 1);
153 else
154 return rpmh_write_async(pd->dev, state, &cmd, 1);
155 }
156
157 static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
158 unsigned int *active, unsigned int *sleep)
159 {
160 *active = corner;
161
162 if (pd->active_only)
163 *sleep = 0;
164 else
165 *sleep = *active;
166 }
167
168
169
170
171
172
173
174
175
176
177 static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
178 {
179 int ret;
180 struct rpmhpd *peer = pd->peer;
181 unsigned int active_corner, sleep_corner;
182 unsigned int this_active_corner = 0, this_sleep_corner = 0;
183 unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
184
185 to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
186
187 if (peer && peer->enabled)
188 to_active_sleep(peer, peer->corner, &peer_active_corner,
189 &peer_sleep_corner);
190
191 active_corner = max(this_active_corner, peer_active_corner);
192
193 ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
194 active_corner > pd->active_corner);
195 if (ret)
196 return ret;
197
198 pd->active_corner = active_corner;
199
200 if (peer) {
201 peer->active_corner = active_corner;
202
203 ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
204 active_corner, false);
205 if (ret)
206 return ret;
207
208 sleep_corner = max(this_sleep_corner, peer_sleep_corner);
209
210 return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
211 false);
212 }
213
214 return ret;
215 }
216
217 static int rpmhpd_power_on(struct generic_pm_domain *domain)
218 {
219 struct rpmhpd *pd = domain_to_rpmhpd(domain);
220 int ret = 0;
221
222 mutex_lock(&rpmhpd_lock);
223
224 if (pd->corner)
225 ret = rpmhpd_aggregate_corner(pd, pd->corner);
226
227 if (!ret)
228 pd->enabled = true;
229
230 mutex_unlock(&rpmhpd_lock);
231
232 return ret;
233 }
234
235 static int rpmhpd_power_off(struct generic_pm_domain *domain)
236 {
237 struct rpmhpd *pd = domain_to_rpmhpd(domain);
238 int ret = 0;
239
240 mutex_lock(&rpmhpd_lock);
241
242 ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
243
244 if (!ret)
245 pd->enabled = false;
246
247 mutex_unlock(&rpmhpd_lock);
248
249 return ret;
250 }
251
252 static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
253 unsigned int level)
254 {
255 struct rpmhpd *pd = domain_to_rpmhpd(domain);
256 int ret = 0, i;
257
258 mutex_lock(&rpmhpd_lock);
259
260 for (i = 0; i < pd->level_count; i++)
261 if (level <= pd->level[i])
262 break;
263
264
265
266
267
268 if (i == pd->level_count)
269 i--;
270
271 if (pd->enabled) {
272 ret = rpmhpd_aggregate_corner(pd, i);
273 if (ret)
274 goto out;
275 }
276
277 pd->corner = i;
278 out:
279 mutex_unlock(&rpmhpd_lock);
280
281 return ret;
282 }
283
284 static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
285 struct dev_pm_opp *opp)
286 {
287 return dev_pm_opp_get_level(opp);
288 }
289
290 static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
291 {
292 int i;
293 const u16 *buf;
294
295 buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
296 if (IS_ERR(buf))
297 return PTR_ERR(buf);
298
299
300 rpmhpd->level_count >>= 1;
301
302 if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
303 return -EINVAL;
304
305 for (i = 0; i < rpmhpd->level_count; i++) {
306 rpmhpd->level[i] = buf[i];
307
308
309
310
311
312 if (i > 0 && rpmhpd->level[i] == 0) {
313 rpmhpd->level_count = i;
314 break;
315 }
316 pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
317 rpmhpd->level[i]);
318 }
319
320 return 0;
321 }
322
323 static int rpmhpd_probe(struct platform_device *pdev)
324 {
325 int i, ret;
326 size_t num_pds;
327 struct device *dev = &pdev->dev;
328 struct genpd_onecell_data *data;
329 struct rpmhpd **rpmhpds;
330 const struct rpmhpd_desc *desc;
331
332 desc = of_device_get_match_data(dev);
333 if (!desc)
334 return -EINVAL;
335
336 rpmhpds = desc->rpmhpds;
337 num_pds = desc->num_pds;
338
339 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
340 if (!data)
341 return -ENOMEM;
342
343 data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
344 GFP_KERNEL);
345 if (!data->domains)
346 return -ENOMEM;
347
348 data->num_domains = num_pds;
349
350 for (i = 0; i < num_pds; i++) {
351 if (!rpmhpds[i]) {
352 dev_warn(dev, "rpmhpds[%d] is empty\n", i);
353 continue;
354 }
355
356 rpmhpds[i]->dev = dev;
357 rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
358 if (!rpmhpds[i]->addr) {
359 dev_err(dev, "Could not find RPMh address for resource %s\n",
360 rpmhpds[i]->res_name);
361 return -ENODEV;
362 }
363
364 ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
365 if (ret != CMD_DB_HW_ARC) {
366 dev_err(dev, "RPMh slave ID mismatch\n");
367 return -EINVAL;
368 }
369
370 ret = rpmhpd_update_level_mapping(rpmhpds[i]);
371 if (ret)
372 return ret;
373
374 rpmhpds[i]->pd.power_off = rpmhpd_power_off;
375 rpmhpds[i]->pd.power_on = rpmhpd_power_on;
376 rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
377 rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
378 pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
379
380 data->domains[i] = &rpmhpds[i]->pd;
381 }
382
383
384 for (i = 0; i < num_pds; i++) {
385 if (!rpmhpds[i])
386 continue;
387 if (rpmhpds[i]->parent)
388 pm_genpd_add_subdomain(rpmhpds[i]->parent,
389 &rpmhpds[i]->pd);
390 }
391
392 return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
393 }
394
395 static struct platform_driver rpmhpd_driver = {
396 .driver = {
397 .name = "qcom-rpmhpd",
398 .of_match_table = rpmhpd_match_table,
399 .suppress_bind_attrs = true,
400 },
401 .probe = rpmhpd_probe,
402 };
403
404 static int __init rpmhpd_init(void)
405 {
406 return platform_driver_register(&rpmhpd_driver);
407 }
408 core_initcall(rpmhpd_init);