This source file includes following definitions.
- qcom_scm_command_to_response
- qcom_scm_get_command_buffer
- qcom_scm_get_response_buffer
- smc
- qcom_scm_call
- qcom_scm_call_atomic1
- qcom_scm_call_atomic2
- qcom_scm_get_version
- __qcom_scm_set_cold_boot_addr
- __qcom_scm_set_warm_boot_addr
- __qcom_scm_cpu_power_down
- __qcom_scm_is_call_available
- __qcom_scm_hdcp_req
- __qcom_scm_init
- __qcom_scm_pas_supported
- __qcom_scm_pas_init_image
- __qcom_scm_pas_mem_setup
- __qcom_scm_pas_auth_and_reset
- __qcom_scm_pas_shutdown
- __qcom_scm_pas_mss_reset
- __qcom_scm_set_dload_mode
- __qcom_scm_set_remote_state
- __qcom_scm_assign_mem
- __qcom_scm_restore_sec_cfg
- __qcom_scm_iommu_secure_ptbl_size
- __qcom_scm_iommu_secure_ptbl_init
- __qcom_scm_io_readl
- __qcom_scm_io_writel
1
2
3
4
5
6 #include <linux/slab.h>
7 #include <linux/io.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/qcom_scm.h>
13 #include <linux/dma-mapping.h>
14
15 #include "qcom_scm.h"
16
17 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
18 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
19 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
20 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
21
22 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
23 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
24 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
25 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
26
27 struct qcom_scm_entry {
28 int flag;
29 void *entry;
30 };
31
32 static struct qcom_scm_entry qcom_scm_wb[] = {
33 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
34 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
35 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
36 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
37 };
38
39 static DEFINE_MUTEX(qcom_scm_lock);
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65 struct qcom_scm_command {
66 __le32 len;
67 __le32 buf_offset;
68 __le32 resp_hdr_offset;
69 __le32 id;
70 __le32 buf[0];
71 };
72
73
74
75
76
77
78
79 struct qcom_scm_response {
80 __le32 len;
81 __le32 buf_offset;
82 __le32 is_complete;
83 };
84
85
86
87
88
89
90
91 static inline struct qcom_scm_response *qcom_scm_command_to_response(
92 const struct qcom_scm_command *cmd)
93 {
94 return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
95 }
96
97
98
99
100
101
102
103 static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
104 {
105 return (void *)cmd->buf;
106 }
107
108
109
110
111
112
113
114 static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
115 {
116 return (void *)rsp + le32_to_cpu(rsp->buf_offset);
117 }
118
119 static u32 smc(u32 cmd_addr)
120 {
121 int context_id;
122 register u32 r0 asm("r0") = 1;
123 register u32 r1 asm("r1") = (u32)&context_id;
124 register u32 r2 asm("r2") = cmd_addr;
125 do {
126 asm volatile(
127 __asmeq("%0", "r0")
128 __asmeq("%1", "r0")
129 __asmeq("%2", "r1")
130 __asmeq("%3", "r2")
131 #ifdef REQUIRES_SEC
132 ".arch_extension sec\n"
133 #endif
134 "smc #0 @ switch to secure world\n"
135 : "=r" (r0)
136 : "r" (r0), "r" (r1), "r" (r2)
137 : "r3", "r12");
138 } while (r0 == QCOM_SCM_INTERRUPTED);
139
140 return r0;
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
163 const void *cmd_buf, size_t cmd_len, void *resp_buf,
164 size_t resp_len)
165 {
166 int ret;
167 struct qcom_scm_command *cmd;
168 struct qcom_scm_response *rsp;
169 size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
170 dma_addr_t cmd_phys;
171
172 cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
173 if (!cmd)
174 return -ENOMEM;
175
176 cmd->len = cpu_to_le32(alloc_len);
177 cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
178 cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
179
180 cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
181 if (cmd_buf)
182 memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
183
184 rsp = qcom_scm_command_to_response(cmd);
185
186 cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
187 if (dma_mapping_error(dev, cmd_phys)) {
188 kfree(cmd);
189 return -ENOMEM;
190 }
191
192 mutex_lock(&qcom_scm_lock);
193 ret = smc(cmd_phys);
194 if (ret < 0)
195 ret = qcom_scm_remap_error(ret);
196 mutex_unlock(&qcom_scm_lock);
197 if (ret)
198 goto out;
199
200 do {
201 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
202 sizeof(*rsp), DMA_FROM_DEVICE);
203 } while (!rsp->is_complete);
204
205 if (resp_buf) {
206 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
207 le32_to_cpu(rsp->buf_offset),
208 resp_len, DMA_FROM_DEVICE);
209 memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
210 resp_len);
211 }
212 out:
213 dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
214 kfree(cmd);
215 return ret;
216 }
217
218 #define SCM_CLASS_REGISTER (0x2 << 8)
219 #define SCM_MASK_IRQS BIT(5)
220 #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
221 SCM_CLASS_REGISTER | \
222 SCM_MASK_IRQS | \
223 (n & 0xf))
224
225
226
227
228
229
230
231
232
233
234 static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
235 {
236 int context_id;
237
238 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
239 register u32 r1 asm("r1") = (u32)&context_id;
240 register u32 r2 asm("r2") = arg1;
241
242 asm volatile(
243 __asmeq("%0", "r0")
244 __asmeq("%1", "r0")
245 __asmeq("%2", "r1")
246 __asmeq("%3", "r2")
247 #ifdef REQUIRES_SEC
248 ".arch_extension sec\n"
249 #endif
250 "smc #0 @ switch to secure world\n"
251 : "=r" (r0)
252 : "r" (r0), "r" (r1), "r" (r2)
253 : "r3", "r12");
254 return r0;
255 }
256
257
258
259
260
261
262
263
264
265
266
267 static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
268 {
269 int context_id;
270
271 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
272 register u32 r1 asm("r1") = (u32)&context_id;
273 register u32 r2 asm("r2") = arg1;
274 register u32 r3 asm("r3") = arg2;
275
276 asm volatile(
277 __asmeq("%0", "r0")
278 __asmeq("%1", "r0")
279 __asmeq("%2", "r1")
280 __asmeq("%3", "r2")
281 __asmeq("%4", "r3")
282 #ifdef REQUIRES_SEC
283 ".arch_extension sec\n"
284 #endif
285 "smc #0 @ switch to secure world\n"
286 : "=r" (r0)
287 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
288 : "r12");
289 return r0;
290 }
291
292 u32 qcom_scm_get_version(void)
293 {
294 int context_id;
295 static u32 version = -1;
296 register u32 r0 asm("r0");
297 register u32 r1 asm("r1");
298
299 if (version != -1)
300 return version;
301
302 mutex_lock(&qcom_scm_lock);
303
304 r0 = 0x1 << 8;
305 r1 = (u32)&context_id;
306 do {
307 asm volatile(
308 __asmeq("%0", "r0")
309 __asmeq("%1", "r1")
310 __asmeq("%2", "r0")
311 __asmeq("%3", "r1")
312 #ifdef REQUIRES_SEC
313 ".arch_extension sec\n"
314 #endif
315 "smc #0 @ switch to secure world\n"
316 : "=r" (r0), "=r" (r1)
317 : "r" (r0), "r" (r1)
318 : "r2", "r3", "r12");
319 } while (r0 == QCOM_SCM_INTERRUPTED);
320
321 version = r1;
322 mutex_unlock(&qcom_scm_lock);
323
324 return version;
325 }
326 EXPORT_SYMBOL(qcom_scm_get_version);
327
328
329
330
331
332
333
334
335
336 int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
337 {
338 int flags = 0;
339 int cpu;
340 int scm_cb_flags[] = {
341 QCOM_SCM_FLAG_COLDBOOT_CPU0,
342 QCOM_SCM_FLAG_COLDBOOT_CPU1,
343 QCOM_SCM_FLAG_COLDBOOT_CPU2,
344 QCOM_SCM_FLAG_COLDBOOT_CPU3,
345 };
346
347 if (!cpus || (cpus && cpumask_empty(cpus)))
348 return -EINVAL;
349
350 for_each_cpu(cpu, cpus) {
351 if (cpu < ARRAY_SIZE(scm_cb_flags))
352 flags |= scm_cb_flags[cpu];
353 else
354 set_cpu_present(cpu, false);
355 }
356
357 return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
358 flags, virt_to_phys(entry));
359 }
360
361
362
363
364
365
366
367
368
369 int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
370 const cpumask_t *cpus)
371 {
372 int ret;
373 int flags = 0;
374 int cpu;
375 struct {
376 __le32 flags;
377 __le32 addr;
378 } cmd;
379
380
381
382
383
384 for_each_cpu(cpu, cpus) {
385 if (entry == qcom_scm_wb[cpu].entry)
386 continue;
387 flags |= qcom_scm_wb[cpu].flag;
388 }
389
390
391 if (!flags)
392 return 0;
393
394 cmd.addr = cpu_to_le32(virt_to_phys(entry));
395 cmd.flags = cpu_to_le32(flags);
396 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
397 &cmd, sizeof(cmd), NULL, 0);
398 if (!ret) {
399 for_each_cpu(cpu, cpus)
400 qcom_scm_wb[cpu].entry = entry;
401 }
402
403 return ret;
404 }
405
406
407
408
409
410
411
412
413
414 void __qcom_scm_cpu_power_down(u32 flags)
415 {
416 qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
417 flags & QCOM_SCM_FLUSH_FLAG_MASK);
418 }
419
420 int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
421 {
422 int ret;
423 __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
424 __le32 ret_val = 0;
425
426 ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
427 &svc_cmd, sizeof(svc_cmd), &ret_val,
428 sizeof(ret_val));
429 if (ret)
430 return ret;
431
432 return le32_to_cpu(ret_val);
433 }
434
435 int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
436 u32 req_cnt, u32 *resp)
437 {
438 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
439 return -ERANGE;
440
441 return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
442 req, req_cnt * sizeof(*req), resp, sizeof(*resp));
443 }
444
445 void __qcom_scm_init(void)
446 {
447 }
448
449 bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
450 {
451 __le32 out;
452 __le32 in;
453 int ret;
454
455 in = cpu_to_le32(peripheral);
456 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
457 QCOM_SCM_PAS_IS_SUPPORTED_CMD,
458 &in, sizeof(in),
459 &out, sizeof(out));
460
461 return ret ? false : !!out;
462 }
463
464 int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
465 dma_addr_t metadata_phys)
466 {
467 __le32 scm_ret;
468 int ret;
469 struct {
470 __le32 proc;
471 __le32 image_addr;
472 } request;
473
474 request.proc = cpu_to_le32(peripheral);
475 request.image_addr = cpu_to_le32(metadata_phys);
476
477 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
478 QCOM_SCM_PAS_INIT_IMAGE_CMD,
479 &request, sizeof(request),
480 &scm_ret, sizeof(scm_ret));
481
482 return ret ? : le32_to_cpu(scm_ret);
483 }
484
485 int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
486 phys_addr_t addr, phys_addr_t size)
487 {
488 __le32 scm_ret;
489 int ret;
490 struct {
491 __le32 proc;
492 __le32 addr;
493 __le32 len;
494 } request;
495
496 request.proc = cpu_to_le32(peripheral);
497 request.addr = cpu_to_le32(addr);
498 request.len = cpu_to_le32(size);
499
500 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
501 QCOM_SCM_PAS_MEM_SETUP_CMD,
502 &request, sizeof(request),
503 &scm_ret, sizeof(scm_ret));
504
505 return ret ? : le32_to_cpu(scm_ret);
506 }
507
508 int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
509 {
510 __le32 out;
511 __le32 in;
512 int ret;
513
514 in = cpu_to_le32(peripheral);
515 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
516 QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
517 &in, sizeof(in),
518 &out, sizeof(out));
519
520 return ret ? : le32_to_cpu(out);
521 }
522
523 int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
524 {
525 __le32 out;
526 __le32 in;
527 int ret;
528
529 in = cpu_to_le32(peripheral);
530 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
531 QCOM_SCM_PAS_SHUTDOWN_CMD,
532 &in, sizeof(in),
533 &out, sizeof(out));
534
535 return ret ? : le32_to_cpu(out);
536 }
537
538 int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
539 {
540 __le32 out;
541 __le32 in = cpu_to_le32(reset);
542 int ret;
543
544 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
545 &in, sizeof(in),
546 &out, sizeof(out));
547
548 return ret ? : le32_to_cpu(out);
549 }
550
551 int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
552 {
553 return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
554 enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
555 }
556
557 int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
558 {
559 struct {
560 __le32 state;
561 __le32 id;
562 } req;
563 __le32 scm_ret = 0;
564 int ret;
565
566 req.state = cpu_to_le32(state);
567 req.id = cpu_to_le32(id);
568
569 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
570 &req, sizeof(req), &scm_ret, sizeof(scm_ret));
571
572 return ret ? : le32_to_cpu(scm_ret);
573 }
574
575 int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
576 size_t mem_sz, phys_addr_t src, size_t src_sz,
577 phys_addr_t dest, size_t dest_sz)
578 {
579 return -ENODEV;
580 }
581
582 int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
583 u32 spare)
584 {
585 return -ENODEV;
586 }
587
588 int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
589 size_t *size)
590 {
591 return -ENODEV;
592 }
593
594 int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
595 u32 spare)
596 {
597 return -ENODEV;
598 }
599
600 int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
601 unsigned int *val)
602 {
603 int ret;
604
605 ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
606 if (ret >= 0)
607 *val = ret;
608
609 return ret < 0 ? ret : 0;
610 }
611
612 int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
613 {
614 return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
615 addr, val);
616 }