This source file includes following definitions.
- spm_register_write
- spm_register_write_sync
- spm_register_read
- spm_set_low_power_mode
- qcom_pm_collapse
- qcom_cpu_spc
- qcom_idle_enter
- qcom_cpuidle_init
- spm_get_drv
- spm_dev_probe
1
2
3
4
5
6
7
8
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/slab.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_device.h>
16 #include <linux/err.h>
17 #include <linux/platform_device.h>
18 #include <linux/cpuidle.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/qcom_scm.h>
21
22 #include <asm/cpuidle.h>
23 #include <asm/proc-fns.h>
24 #include <asm/suspend.h>
25
26 #define MAX_PMIC_DATA 2
27 #define MAX_SEQ_DATA 64
28 #define SPM_CTL_INDEX 0x7f
29 #define SPM_CTL_INDEX_SHIFT 4
30 #define SPM_CTL_EN BIT(0)
31
32 enum pm_sleep_mode {
33 PM_SLEEP_MODE_STBY,
34 PM_SLEEP_MODE_RET,
35 PM_SLEEP_MODE_SPC,
36 PM_SLEEP_MODE_PC,
37 PM_SLEEP_MODE_NR,
38 };
39
40 enum spm_reg {
41 SPM_REG_CFG,
42 SPM_REG_SPM_CTL,
43 SPM_REG_DLY,
44 SPM_REG_PMIC_DLY,
45 SPM_REG_PMIC_DATA_0,
46 SPM_REG_PMIC_DATA_1,
47 SPM_REG_VCTL,
48 SPM_REG_SEQ_ENTRY,
49 SPM_REG_SPM_STS,
50 SPM_REG_PMIC_STS,
51 SPM_REG_NR,
52 };
53
54 struct spm_reg_data {
55 const u8 *reg_offset;
56 u32 spm_cfg;
57 u32 spm_dly;
58 u32 pmic_dly;
59 u32 pmic_data[MAX_PMIC_DATA];
60 u8 seq[MAX_SEQ_DATA];
61 u8 start_index[PM_SLEEP_MODE_NR];
62 };
63
64 struct spm_driver_data {
65 void __iomem *reg_base;
66 const struct spm_reg_data *reg_data;
67 };
68
69 static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
70 [SPM_REG_CFG] = 0x08,
71 [SPM_REG_SPM_CTL] = 0x30,
72 [SPM_REG_DLY] = 0x34,
73 [SPM_REG_SEQ_ENTRY] = 0x80,
74 };
75
76
77 static const struct spm_reg_data spm_reg_8974_8084_cpu = {
78 .reg_offset = spm_reg_offset_v2_1,
79 .spm_cfg = 0x1,
80 .spm_dly = 0x3C102800,
81 .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
82 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
83 0x0F },
84 .start_index[PM_SLEEP_MODE_STBY] = 0,
85 .start_index[PM_SLEEP_MODE_SPC] = 3,
86 };
87
88 static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
89 [SPM_REG_CFG] = 0x08,
90 [SPM_REG_SPM_CTL] = 0x20,
91 [SPM_REG_PMIC_DLY] = 0x24,
92 [SPM_REG_PMIC_DATA_0] = 0x28,
93 [SPM_REG_PMIC_DATA_1] = 0x2C,
94 [SPM_REG_SEQ_ENTRY] = 0x80,
95 };
96
97
98 static const struct spm_reg_data spm_reg_8064_cpu = {
99 .reg_offset = spm_reg_offset_v1_1,
100 .spm_cfg = 0x1F,
101 .pmic_dly = 0x02020004,
102 .pmic_data[0] = 0x0084009C,
103 .pmic_data[1] = 0x00A4001C,
104 .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
105 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
106 .start_index[PM_SLEEP_MODE_STBY] = 0,
107 .start_index[PM_SLEEP_MODE_SPC] = 2,
108 };
109
110 static DEFINE_PER_CPU(struct spm_driver_data *, cpu_spm_drv);
111
112 typedef int (*idle_fn)(void);
113 static DEFINE_PER_CPU(idle_fn*, qcom_idle_ops);
114
115 static inline void spm_register_write(struct spm_driver_data *drv,
116 enum spm_reg reg, u32 val)
117 {
118 if (drv->reg_data->reg_offset[reg])
119 writel_relaxed(val, drv->reg_base +
120 drv->reg_data->reg_offset[reg]);
121 }
122
123
124 static inline void spm_register_write_sync(struct spm_driver_data *drv,
125 enum spm_reg reg, u32 val)
126 {
127 u32 ret;
128
129 if (!drv->reg_data->reg_offset[reg])
130 return;
131
132 do {
133 writel_relaxed(val, drv->reg_base +
134 drv->reg_data->reg_offset[reg]);
135 ret = readl_relaxed(drv->reg_base +
136 drv->reg_data->reg_offset[reg]);
137 if (ret == val)
138 break;
139 cpu_relax();
140 } while (1);
141 }
142
143 static inline u32 spm_register_read(struct spm_driver_data *drv,
144 enum spm_reg reg)
145 {
146 return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
147 }
148
149 static void spm_set_low_power_mode(struct spm_driver_data *drv,
150 enum pm_sleep_mode mode)
151 {
152 u32 start_index;
153 u32 ctl_val;
154
155 start_index = drv->reg_data->start_index[mode];
156
157 ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
158 ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
159 ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
160 ctl_val |= SPM_CTL_EN;
161 spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
162 }
163
164 static int qcom_pm_collapse(unsigned long int unused)
165 {
166 qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
167
168
169
170
171
172 return -1;
173 }
174
175 static int qcom_cpu_spc(void)
176 {
177 int ret;
178 struct spm_driver_data *drv = __this_cpu_read(cpu_spm_drv);
179
180 spm_set_low_power_mode(drv, PM_SLEEP_MODE_SPC);
181 ret = cpu_suspend(0, qcom_pm_collapse);
182
183
184
185
186
187
188 spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
189
190 return ret;
191 }
192
193 static int qcom_idle_enter(unsigned long index)
194 {
195 return __this_cpu_read(qcom_idle_ops)[index]();
196 }
197
198 static const struct of_device_id qcom_idle_state_match[] __initconst = {
199 { .compatible = "qcom,idle-state-spc", .data = qcom_cpu_spc },
200 { },
201 };
202
203 static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu)
204 {
205 const struct of_device_id *match_id;
206 struct device_node *state_node;
207 int i;
208 int state_count = 1;
209 idle_fn idle_fns[CPUIDLE_STATE_MAX];
210 idle_fn *fns;
211 cpumask_t mask;
212 bool use_scm_power_down = false;
213
214 if (!qcom_scm_is_available())
215 return -EPROBE_DEFER;
216
217 for (i = 0; ; i++) {
218 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
219 if (!state_node)
220 break;
221
222 if (!of_device_is_available(state_node))
223 continue;
224
225 if (i == CPUIDLE_STATE_MAX) {
226 pr_warn("%s: cpuidle states reached max possible\n",
227 __func__);
228 break;
229 }
230
231 match_id = of_match_node(qcom_idle_state_match, state_node);
232 if (!match_id)
233 return -ENODEV;
234
235 idle_fns[state_count] = match_id->data;
236
237
238 if (match_id->data == qcom_cpu_spc)
239 use_scm_power_down = true;
240
241 state_count++;
242 }
243
244 if (state_count == 1)
245 goto check_spm;
246
247 fns = devm_kcalloc(get_cpu_device(cpu), state_count, sizeof(*fns),
248 GFP_KERNEL);
249 if (!fns)
250 return -ENOMEM;
251
252 for (i = 1; i < state_count; i++)
253 fns[i] = idle_fns[i];
254
255 if (use_scm_power_down) {
256
257 cpumask_clear(&mask);
258 cpumask_set_cpu(cpu, &mask);
259 qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask);
260 }
261
262 per_cpu(qcom_idle_ops, cpu) = fns;
263
264
265
266
267
268
269 check_spm:
270 return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
271 }
272
273 static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
274 .suspend = qcom_idle_enter,
275 .init = qcom_cpuidle_init,
276 };
277
278 CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
279 CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
280
281 static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
282 int *spm_cpu)
283 {
284 struct spm_driver_data *drv = NULL;
285 struct device_node *cpu_node, *saw_node;
286 int cpu;
287 bool found = 0;
288
289 for_each_possible_cpu(cpu) {
290 cpu_node = of_cpu_device_node_get(cpu);
291 if (!cpu_node)
292 continue;
293 saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
294 found = (saw_node == pdev->dev.of_node);
295 of_node_put(saw_node);
296 of_node_put(cpu_node);
297 if (found)
298 break;
299 }
300
301 if (found) {
302 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
303 if (drv)
304 *spm_cpu = cpu;
305 }
306
307 return drv;
308 }
309
310 static const struct of_device_id spm_match_table[] = {
311 { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
312 .data = &spm_reg_8974_8084_cpu },
313 { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
314 .data = &spm_reg_8974_8084_cpu },
315 { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
316 .data = &spm_reg_8064_cpu },
317 { },
318 };
319
320 static int spm_dev_probe(struct platform_device *pdev)
321 {
322 struct spm_driver_data *drv;
323 struct resource *res;
324 const struct of_device_id *match_id;
325 void __iomem *addr;
326 int cpu;
327
328 drv = spm_get_drv(pdev, &cpu);
329 if (!drv)
330 return -EINVAL;
331
332 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
333 drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
334 if (IS_ERR(drv->reg_base))
335 return PTR_ERR(drv->reg_base);
336
337 match_id = of_match_node(spm_match_table, pdev->dev.of_node);
338 if (!match_id)
339 return -ENODEV;
340
341 drv->reg_data = match_id->data;
342
343
344 addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
345 __iowrite32_copy(addr, drv->reg_data->seq,
346 ARRAY_SIZE(drv->reg_data->seq) / 4);
347
348
349
350
351
352
353
354 spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
355 spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
356 spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
357 spm_register_write(drv, SPM_REG_PMIC_DATA_0,
358 drv->reg_data->pmic_data[0]);
359 spm_register_write(drv, SPM_REG_PMIC_DATA_1,
360 drv->reg_data->pmic_data[1]);
361
362
363 spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
364
365 per_cpu(cpu_spm_drv, cpu) = drv;
366
367 return 0;
368 }
369
370 static struct platform_driver spm_driver = {
371 .probe = spm_dev_probe,
372 .driver = {
373 .name = "saw",
374 .of_match_table = spm_match_table,
375 },
376 };
377
378 builtin_platform_driver(spm_driver);