This source file includes following definitions.
- qcom_scm_clk_enable
- qcom_scm_clk_disable
- qcom_scm_set_cold_boot_addr
- qcom_scm_set_warm_boot_addr
- qcom_scm_cpu_power_down
- qcom_scm_hdcp_available
- qcom_scm_hdcp_req
- qcom_scm_pas_supported
- qcom_scm_pas_init_image
- qcom_scm_pas_mem_setup
- qcom_scm_pas_auth_and_reset
- qcom_scm_pas_shutdown
- qcom_scm_pas_reset_assert
- qcom_scm_pas_reset_deassert
- qcom_scm_restore_sec_cfg
- qcom_scm_iommu_secure_ptbl_size
- qcom_scm_iommu_secure_ptbl_init
- qcom_scm_io_readl
- qcom_scm_io_writel
- qcom_scm_set_download_mode
- qcom_scm_find_dload_address
- qcom_scm_is_available
- qcom_scm_set_remote_state
- qcom_scm_assign_mem
- qcom_scm_probe
- qcom_scm_shutdown
- qcom_scm_init
1
2
3
4
5
6
7
8 #include <linux/platform_device.h>
9 #include <linux/init.h>
10 #include <linux/cpumask.h>
11 #include <linux/export.h>
12 #include <linux/dma-direct.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_platform.h>
20 #include <linux/clk.h>
21 #include <linux/reset-controller.h>
22
23 #include "qcom_scm.h"
24
25 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
26 module_param(download_mode, bool, 0);
27
28 #define SCM_HAS_CORE_CLK BIT(0)
29 #define SCM_HAS_IFACE_CLK BIT(1)
30 #define SCM_HAS_BUS_CLK BIT(2)
31
32 struct qcom_scm {
33 struct device *dev;
34 struct clk *core_clk;
35 struct clk *iface_clk;
36 struct clk *bus_clk;
37 struct reset_controller_dev reset;
38
39 u64 dload_mode_addr;
40 };
41
42 struct qcom_scm_current_perm_info {
43 __le32 vmid;
44 __le32 perm;
45 __le64 ctx;
46 __le32 ctx_size;
47 __le32 unused;
48 };
49
50 struct qcom_scm_mem_map_info {
51 __le64 mem_addr;
52 __le64 mem_size;
53 };
54
55 static struct qcom_scm *__scm;
56
57 static int qcom_scm_clk_enable(void)
58 {
59 int ret;
60
61 ret = clk_prepare_enable(__scm->core_clk);
62 if (ret)
63 goto bail;
64
65 ret = clk_prepare_enable(__scm->iface_clk);
66 if (ret)
67 goto disable_core;
68
69 ret = clk_prepare_enable(__scm->bus_clk);
70 if (ret)
71 goto disable_iface;
72
73 return 0;
74
75 disable_iface:
76 clk_disable_unprepare(__scm->iface_clk);
77 disable_core:
78 clk_disable_unprepare(__scm->core_clk);
79 bail:
80 return ret;
81 }
82
83 static void qcom_scm_clk_disable(void)
84 {
85 clk_disable_unprepare(__scm->core_clk);
86 clk_disable_unprepare(__scm->iface_clk);
87 clk_disable_unprepare(__scm->bus_clk);
88 }
89
90
91
92
93
94
95
96
97
98 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
99 {
100 return __qcom_scm_set_cold_boot_addr(entry, cpus);
101 }
102 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
103
104
105
106
107
108
109
110
111
112 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
113 {
114 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
115 }
116 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
117
118
119
120
121
122
123
124
125
126 void qcom_scm_cpu_power_down(u32 flags)
127 {
128 __qcom_scm_cpu_power_down(flags);
129 }
130 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
131
132
133
134
135
136
137 bool qcom_scm_hdcp_available(void)
138 {
139 int ret = qcom_scm_clk_enable();
140
141 if (ret)
142 return ret;
143
144 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
145 QCOM_SCM_CMD_HDCP);
146
147 qcom_scm_clk_disable();
148
149 return ret > 0 ? true : false;
150 }
151 EXPORT_SYMBOL(qcom_scm_hdcp_available);
152
153
154
155
156
157
158
159
160
161 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
162 {
163 int ret = qcom_scm_clk_enable();
164
165 if (ret)
166 return ret;
167
168 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
169 qcom_scm_clk_disable();
170 return ret;
171 }
172 EXPORT_SYMBOL(qcom_scm_hdcp_req);
173
174
175
176
177
178
179
180
181 bool qcom_scm_pas_supported(u32 peripheral)
182 {
183 int ret;
184
185 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
186 QCOM_SCM_PAS_IS_SUPPORTED_CMD);
187 if (ret <= 0)
188 return false;
189
190 return __qcom_scm_pas_supported(__scm->dev, peripheral);
191 }
192 EXPORT_SYMBOL(qcom_scm_pas_supported);
193
194
195
196
197
198
199
200
201
202
203
204
205
206 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
207 {
208 dma_addr_t mdata_phys;
209 void *mdata_buf;
210 int ret;
211
212
213
214
215
216
217 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
218 GFP_KERNEL);
219 if (!mdata_buf) {
220 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
221 return -ENOMEM;
222 }
223 memcpy(mdata_buf, metadata, size);
224
225 ret = qcom_scm_clk_enable();
226 if (ret)
227 goto free_metadata;
228
229 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
230
231 qcom_scm_clk_disable();
232
233 free_metadata:
234 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
235
236 return ret;
237 }
238 EXPORT_SYMBOL(qcom_scm_pas_init_image);
239
240
241
242
243
244
245
246
247
248
249 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
250 {
251 int ret;
252
253 ret = qcom_scm_clk_enable();
254 if (ret)
255 return ret;
256
257 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
258 qcom_scm_clk_disable();
259
260 return ret;
261 }
262 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
263
264
265
266
267
268
269
270
271 int qcom_scm_pas_auth_and_reset(u32 peripheral)
272 {
273 int ret;
274
275 ret = qcom_scm_clk_enable();
276 if (ret)
277 return ret;
278
279 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
280 qcom_scm_clk_disable();
281
282 return ret;
283 }
284 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
285
286
287
288
289
290
291
292 int qcom_scm_pas_shutdown(u32 peripheral)
293 {
294 int ret;
295
296 ret = qcom_scm_clk_enable();
297 if (ret)
298 return ret;
299
300 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
301 qcom_scm_clk_disable();
302
303 return ret;
304 }
305 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
306
307 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
308 unsigned long idx)
309 {
310 if (idx != 0)
311 return -EINVAL;
312
313 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
314 }
315
316 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
317 unsigned long idx)
318 {
319 if (idx != 0)
320 return -EINVAL;
321
322 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
323 }
324
325 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
326 .assert = qcom_scm_pas_reset_assert,
327 .deassert = qcom_scm_pas_reset_deassert,
328 };
329
330 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
331 {
332 return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
333 }
334 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
335
336 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
337 {
338 return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
339 }
340 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
341
342 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
343 {
344 return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
345 }
346 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
347
348 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
349 {
350 return __qcom_scm_io_readl(__scm->dev, addr, val);
351 }
352 EXPORT_SYMBOL(qcom_scm_io_readl);
353
354 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
355 {
356 return __qcom_scm_io_writel(__scm->dev, addr, val);
357 }
358 EXPORT_SYMBOL(qcom_scm_io_writel);
359
360 static void qcom_scm_set_download_mode(bool enable)
361 {
362 bool avail;
363 int ret = 0;
364
365 avail = __qcom_scm_is_call_available(__scm->dev,
366 QCOM_SCM_SVC_BOOT,
367 QCOM_SCM_SET_DLOAD_MODE);
368 if (avail) {
369 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
370 } else if (__scm->dload_mode_addr) {
371 ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
372 enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
373 } else {
374 dev_err(__scm->dev,
375 "No available mechanism for setting download mode\n");
376 }
377
378 if (ret)
379 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
380 }
381
382 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
383 {
384 struct device_node *tcsr;
385 struct device_node *np = dev->of_node;
386 struct resource res;
387 u32 offset;
388 int ret;
389
390 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
391 if (!tcsr)
392 return 0;
393
394 ret = of_address_to_resource(tcsr, 0, &res);
395 of_node_put(tcsr);
396 if (ret)
397 return ret;
398
399 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
400 if (ret < 0)
401 return ret;
402
403 *addr = res.start + offset;
404
405 return 0;
406 }
407
408
409
410
411 bool qcom_scm_is_available(void)
412 {
413 return !!__scm;
414 }
415 EXPORT_SYMBOL(qcom_scm_is_available);
416
417 int qcom_scm_set_remote_state(u32 state, u32 id)
418 {
419 return __qcom_scm_set_remote_state(__scm->dev, state, id);
420 }
421 EXPORT_SYMBOL(qcom_scm_set_remote_state);
422
423
424
425
426
427
428
429
430
431
432
433
434
435 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
436 unsigned int *srcvm,
437 const struct qcom_scm_vmperm *newvm,
438 unsigned int dest_cnt)
439 {
440 struct qcom_scm_current_perm_info *destvm;
441 struct qcom_scm_mem_map_info *mem_to_map;
442 phys_addr_t mem_to_map_phys;
443 phys_addr_t dest_phys;
444 phys_addr_t ptr_phys;
445 dma_addr_t ptr_dma;
446 size_t mem_to_map_sz;
447 size_t dest_sz;
448 size_t src_sz;
449 size_t ptr_sz;
450 int next_vm;
451 __le32 *src;
452 void *ptr;
453 int ret, i, b;
454 unsigned long srcvm_bits = *srcvm;
455
456 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
457 mem_to_map_sz = sizeof(*mem_to_map);
458 dest_sz = dest_cnt * sizeof(*destvm);
459 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
460 ALIGN(dest_sz, SZ_64);
461
462 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
463 if (!ptr)
464 return -ENOMEM;
465 ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
466
467
468 src = ptr;
469 i = 0;
470 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
471 src[i++] = cpu_to_le32(b);
472
473
474 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
475 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
476 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
477 mem_to_map->mem_size = cpu_to_le64(mem_sz);
478
479 next_vm = 0;
480
481 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
482 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
483 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
484 destvm->vmid = cpu_to_le32(newvm->vmid);
485 destvm->perm = cpu_to_le32(newvm->perm);
486 destvm->ctx = 0;
487 destvm->ctx_size = 0;
488 next_vm |= BIT(newvm->vmid);
489 }
490
491 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
492 ptr_phys, src_sz, dest_phys, dest_sz);
493 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
494 if (ret) {
495 dev_err(__scm->dev,
496 "Assign memory protection call failed %d\n", ret);
497 return -EINVAL;
498 }
499
500 *srcvm = next_vm;
501 return 0;
502 }
503 EXPORT_SYMBOL(qcom_scm_assign_mem);
504
505 static int qcom_scm_probe(struct platform_device *pdev)
506 {
507 struct qcom_scm *scm;
508 unsigned long clks;
509 int ret;
510
511 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
512 if (!scm)
513 return -ENOMEM;
514
515 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
516 if (ret < 0)
517 return ret;
518
519 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
520
521 scm->core_clk = devm_clk_get(&pdev->dev, "core");
522 if (IS_ERR(scm->core_clk)) {
523 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
524 return PTR_ERR(scm->core_clk);
525
526 if (clks & SCM_HAS_CORE_CLK) {
527 dev_err(&pdev->dev, "failed to acquire core clk\n");
528 return PTR_ERR(scm->core_clk);
529 }
530
531 scm->core_clk = NULL;
532 }
533
534 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
535 if (IS_ERR(scm->iface_clk)) {
536 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
537 return PTR_ERR(scm->iface_clk);
538
539 if (clks & SCM_HAS_IFACE_CLK) {
540 dev_err(&pdev->dev, "failed to acquire iface clk\n");
541 return PTR_ERR(scm->iface_clk);
542 }
543
544 scm->iface_clk = NULL;
545 }
546
547 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
548 if (IS_ERR(scm->bus_clk)) {
549 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
550 return PTR_ERR(scm->bus_clk);
551
552 if (clks & SCM_HAS_BUS_CLK) {
553 dev_err(&pdev->dev, "failed to acquire bus clk\n");
554 return PTR_ERR(scm->bus_clk);
555 }
556
557 scm->bus_clk = NULL;
558 }
559
560 scm->reset.ops = &qcom_scm_pas_reset_ops;
561 scm->reset.nr_resets = 1;
562 scm->reset.of_node = pdev->dev.of_node;
563 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
564 if (ret)
565 return ret;
566
567
568 ret = clk_set_rate(scm->core_clk, INT_MAX);
569 if (ret)
570 return ret;
571
572 __scm = scm;
573 __scm->dev = &pdev->dev;
574
575 __qcom_scm_init();
576
577
578
579
580
581
582 if (download_mode)
583 qcom_scm_set_download_mode(true);
584
585 return 0;
586 }
587
588 static void qcom_scm_shutdown(struct platform_device *pdev)
589 {
590
591 if (download_mode)
592 qcom_scm_set_download_mode(false);
593 }
594
595 static const struct of_device_id qcom_scm_dt_match[] = {
596 { .compatible = "qcom,scm-apq8064",
597
598 },
599 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
600 SCM_HAS_IFACE_CLK |
601 SCM_HAS_BUS_CLK)
602 },
603 { .compatible = "qcom,scm-ipq4019" },
604 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
605 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
606 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
607 SCM_HAS_IFACE_CLK |
608 SCM_HAS_BUS_CLK)
609 },
610 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
611 SCM_HAS_IFACE_CLK |
612 SCM_HAS_BUS_CLK)
613 },
614 { .compatible = "qcom,scm-msm8996" },
615 { .compatible = "qcom,scm" },
616 {}
617 };
618
619 static struct platform_driver qcom_scm_driver = {
620 .driver = {
621 .name = "qcom_scm",
622 .of_match_table = qcom_scm_dt_match,
623 },
624 .probe = qcom_scm_probe,
625 .shutdown = qcom_scm_shutdown,
626 };
627
628 static int __init qcom_scm_init(void)
629 {
630 return platform_driver_register(&qcom_scm_driver);
631 }
632 subsys_initcall(qcom_scm_init);