This source file includes following definitions.
- rproc_crash_to_string
- rproc_iommu_fault
- rproc_enable_iommu
- rproc_disable_iommu
- rproc_va_to_pa
- rproc_da_to_va
- rproc_find_carveout_by_name
- rproc_check_carveout_da
- rproc_alloc_vring
- rproc_parse_vring
- rproc_free_vring
- rproc_vdev_do_start
- rproc_vdev_do_stop
- rproc_rvdev_release
- rproc_handle_vdev
- rproc_vdev_release
- rproc_handle_trace
- rproc_handle_devmem
- rproc_alloc_carveout
- rproc_release_carveout
- rproc_handle_carveout
- rproc_add_carveout
- rproc_mem_entry_init
- rproc_of_resm_mem_entry_init
- rproc_handle_resources
- rproc_prepare_subdevices
- rproc_start_subdevices
- rproc_stop_subdevices
- rproc_unprepare_subdevices
- rproc_alloc_registered_carveouts
- rproc_coredump_cleanup
- rproc_resource_cleanup
- rproc_start
- rproc_fw_boot
- rproc_auto_boot_callback
- rproc_trigger_auto_boot
- rproc_stop
- rproc_coredump_add_segment
- rproc_coredump_add_custom_segment
- rproc_coredump
- rproc_trigger_recovery
- rproc_crash_handler_work
- rproc_boot
- rproc_shutdown
- rproc_get_by_phandle
- rproc_get_by_phandle
- rproc_add
- rproc_type_release
- rproc_alloc
- rproc_free
- rproc_put
- rproc_del
- rproc_add_subdev
- rproc_remove_subdev
- rproc_get_by_child
- rproc_report_crash
- remoteproc_init
- remoteproc_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #define pr_fmt(fmt) "%s: " fmt, __func__
18
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/slab.h>
23 #include <linux/mutex.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/firmware.h>
26 #include <linux/string.h>
27 #include <linux/debugfs.h>
28 #include <linux/devcoredump.h>
29 #include <linux/remoteproc.h>
30 #include <linux/iommu.h>
31 #include <linux/idr.h>
32 #include <linux/elf.h>
33 #include <linux/crc32.h>
34 #include <linux/of_reserved_mem.h>
35 #include <linux/virtio_ids.h>
36 #include <linux/virtio_ring.h>
37 #include <asm/byteorder.h>
38 #include <linux/platform_device.h>
39
40 #include "remoteproc_internal.h"
41
42 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL
43
44 static DEFINE_MUTEX(rproc_list_mutex);
45 static LIST_HEAD(rproc_list);
46
47 typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
48 struct resource_table *table, int len);
49 typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
50 void *, int offset, int avail);
51
52 static int rproc_alloc_carveout(struct rproc *rproc,
53 struct rproc_mem_entry *mem);
54 static int rproc_release_carveout(struct rproc *rproc,
55 struct rproc_mem_entry *mem);
56
57
58 static DEFINE_IDA(rproc_dev_index);
59
60 static const char * const rproc_crash_names[] = {
61 [RPROC_MMUFAULT] = "mmufault",
62 [RPROC_WATCHDOG] = "watchdog",
63 [RPROC_FATAL_ERROR] = "fatal error",
64 };
65
66
67 static const char *rproc_crash_to_string(enum rproc_crash_type type)
68 {
69 if (type < ARRAY_SIZE(rproc_crash_names))
70 return rproc_crash_names[type];
71 return "unknown";
72 }
73
74
75
76
77
78
79
80
81
82 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
83 unsigned long iova, int flags, void *token)
84 {
85 struct rproc *rproc = token;
86
87 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
88
89 rproc_report_crash(rproc, RPROC_MMUFAULT);
90
91
92
93
94
95 return -ENOSYS;
96 }
97
98 static int rproc_enable_iommu(struct rproc *rproc)
99 {
100 struct iommu_domain *domain;
101 struct device *dev = rproc->dev.parent;
102 int ret;
103
104 if (!rproc->has_iommu) {
105 dev_dbg(dev, "iommu not present\n");
106 return 0;
107 }
108
109 domain = iommu_domain_alloc(dev->bus);
110 if (!domain) {
111 dev_err(dev, "can't alloc iommu domain\n");
112 return -ENOMEM;
113 }
114
115 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
116
117 ret = iommu_attach_device(domain, dev);
118 if (ret) {
119 dev_err(dev, "can't attach iommu device: %d\n", ret);
120 goto free_domain;
121 }
122
123 rproc->domain = domain;
124
125 return 0;
126
127 free_domain:
128 iommu_domain_free(domain);
129 return ret;
130 }
131
132 static void rproc_disable_iommu(struct rproc *rproc)
133 {
134 struct iommu_domain *domain = rproc->domain;
135 struct device *dev = rproc->dev.parent;
136
137 if (!domain)
138 return;
139
140 iommu_detach_device(domain, dev);
141 iommu_domain_free(domain);
142 }
143
144 phys_addr_t rproc_va_to_pa(void *cpu_addr)
145 {
146
147
148
149
150
151 if (is_vmalloc_addr(cpu_addr)) {
152 return page_to_phys(vmalloc_to_page(cpu_addr)) +
153 offset_in_page(cpu_addr);
154 }
155
156 WARN_ON(!virt_addr_valid(cpu_addr));
157 return virt_to_phys(cpu_addr);
158 }
159 EXPORT_SYMBOL(rproc_va_to_pa);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190 void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
191 {
192 struct rproc_mem_entry *carveout;
193 void *ptr = NULL;
194
195 if (rproc->ops->da_to_va) {
196 ptr = rproc->ops->da_to_va(rproc, da, len);
197 if (ptr)
198 goto out;
199 }
200
201 list_for_each_entry(carveout, &rproc->carveouts, node) {
202 int offset = da - carveout->da;
203
204
205 if (!carveout->va)
206 continue;
207
208
209 if (offset < 0)
210 continue;
211
212
213 if (offset + len > carveout->len)
214 continue;
215
216 ptr = carveout->va + offset;
217
218 break;
219 }
220
221 out:
222 return ptr;
223 }
224 EXPORT_SYMBOL(rproc_da_to_va);
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243 struct rproc_mem_entry *
244 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
245 {
246 va_list args;
247 char _name[32];
248 struct rproc_mem_entry *carveout, *mem = NULL;
249
250 if (!name)
251 return NULL;
252
253 va_start(args, name);
254 vsnprintf(_name, sizeof(_name), name, args);
255 va_end(args);
256
257 list_for_each_entry(carveout, &rproc->carveouts, node) {
258
259 if (!strcmp(carveout->name, _name)) {
260 mem = carveout;
261 break;
262 }
263 }
264
265 return mem;
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282 static int rproc_check_carveout_da(struct rproc *rproc,
283 struct rproc_mem_entry *mem, u32 da, u32 len)
284 {
285 struct device *dev = &rproc->dev;
286 int delta;
287
288
289 if (len > mem->len) {
290 dev_err(dev, "Registered carveout doesn't fit len request\n");
291 return -EINVAL;
292 }
293
294 if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) {
295
296 return -EINVAL;
297 } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) {
298 delta = da - mem->da;
299
300
301 if (delta < 0) {
302 dev_err(dev,
303 "Registered carveout doesn't fit da request\n");
304 return -EINVAL;
305 }
306
307 if (delta + len > mem->len) {
308 dev_err(dev,
309 "Registered carveout doesn't fit len request\n");
310 return -EINVAL;
311 }
312 }
313
314 return 0;
315 }
316
317 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
318 {
319 struct rproc *rproc = rvdev->rproc;
320 struct device *dev = &rproc->dev;
321 struct rproc_vring *rvring = &rvdev->vring[i];
322 struct fw_rsc_vdev *rsc;
323 int ret, size, notifyid;
324 struct rproc_mem_entry *mem;
325
326
327 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
328
329 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
330
331
332 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
333 i);
334 if (mem) {
335 if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
336 return -ENOMEM;
337 } else {
338
339 mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da,
340 rproc_alloc_carveout,
341 rproc_release_carveout,
342 "vdev%dvring%d",
343 rvdev->index, i);
344 if (!mem) {
345 dev_err(dev, "Can't allocate memory entry structure\n");
346 return -ENOMEM;
347 }
348
349 rproc_add_carveout(rproc, mem);
350 }
351
352
353
354
355
356
357 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
358 if (ret < 0) {
359 dev_err(dev, "idr_alloc failed: %d\n", ret);
360 return ret;
361 }
362 notifyid = ret;
363
364
365 if (notifyid > rproc->max_notifyid)
366 rproc->max_notifyid = notifyid;
367
368 rvring->notifyid = notifyid;
369
370
371 rsc->vring[i].notifyid = notifyid;
372 return 0;
373 }
374
375 static int
376 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
377 {
378 struct rproc *rproc = rvdev->rproc;
379 struct device *dev = &rproc->dev;
380 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
381 struct rproc_vring *rvring = &rvdev->vring[i];
382
383 dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n",
384 i, vring->da, vring->num, vring->align);
385
386
387 if (!vring->num || !vring->align) {
388 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
389 vring->num, vring->align);
390 return -EINVAL;
391 }
392
393 rvring->len = vring->num;
394 rvring->align = vring->align;
395 rvring->rvdev = rvdev;
396
397 return 0;
398 }
399
400 void rproc_free_vring(struct rproc_vring *rvring)
401 {
402 struct rproc *rproc = rvring->rvdev->rproc;
403 int idx = rvring - rvring->rvdev->vring;
404 struct fw_rsc_vdev *rsc;
405
406 idr_remove(&rproc->notifyids, rvring->notifyid);
407
408
409 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
410 rsc->vring[idx].da = 0;
411 rsc->vring[idx].notifyid = -1;
412 }
413
414 static int rproc_vdev_do_start(struct rproc_subdev *subdev)
415 {
416 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
417
418 return rproc_add_virtio_dev(rvdev, rvdev->id);
419 }
420
421 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
422 {
423 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
424 int ret;
425
426 ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
427 if (ret)
428 dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
429 }
430
431
432
433
434
435
436 static void rproc_rvdev_release(struct device *dev)
437 {
438 struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
439
440 of_reserved_mem_device_release(dev);
441
442 kfree(rvdev);
443 }
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472 static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
473 int offset, int avail)
474 {
475 struct device *dev = &rproc->dev;
476 struct rproc_vdev *rvdev;
477 int i, ret;
478 char name[16];
479
480
481 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
482 + rsc->config_len > avail) {
483 dev_err(dev, "vdev rsc is truncated\n");
484 return -EINVAL;
485 }
486
487
488 if (rsc->reserved[0] || rsc->reserved[1]) {
489 dev_err(dev, "vdev rsc has non zero reserved bytes\n");
490 return -EINVAL;
491 }
492
493 dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n",
494 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
495
496
497 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
498 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
499 return -EINVAL;
500 }
501
502 rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
503 if (!rvdev)
504 return -ENOMEM;
505
506 kref_init(&rvdev->refcount);
507
508 rvdev->id = rsc->id;
509 rvdev->rproc = rproc;
510 rvdev->index = rproc->nb_vdev++;
511
512
513 snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
514 rvdev->dev.parent = &rproc->dev;
515 rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
516 rvdev->dev.release = rproc_rvdev_release;
517 dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
518 dev_set_drvdata(&rvdev->dev, rvdev);
519
520 ret = device_register(&rvdev->dev);
521 if (ret) {
522 put_device(&rvdev->dev);
523 return ret;
524 }
525
526 set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
527
528 ret = dma_coerce_mask_and_coherent(&rvdev->dev,
529 dma_get_mask(rproc->dev.parent));
530 if (ret) {
531 dev_warn(dev,
532 "Failed to set DMA mask %llx. Trying to continue... %x\n",
533 dma_get_mask(rproc->dev.parent), ret);
534 }
535
536
537 for (i = 0; i < rsc->num_of_vrings; i++) {
538 ret = rproc_parse_vring(rvdev, rsc, i);
539 if (ret)
540 goto free_rvdev;
541 }
542
543
544 rvdev->rsc_offset = offset;
545
546
547 for (i = 0; i < rsc->num_of_vrings; i++) {
548 ret = rproc_alloc_vring(rvdev, i);
549 if (ret)
550 goto unwind_vring_allocations;
551 }
552
553 list_add_tail(&rvdev->node, &rproc->rvdevs);
554
555 rvdev->subdev.start = rproc_vdev_do_start;
556 rvdev->subdev.stop = rproc_vdev_do_stop;
557
558 rproc_add_subdev(rproc, &rvdev->subdev);
559
560 return 0;
561
562 unwind_vring_allocations:
563 for (i--; i >= 0; i--)
564 rproc_free_vring(&rvdev->vring[i]);
565 free_rvdev:
566 device_unregister(&rvdev->dev);
567 return ret;
568 }
569
570 void rproc_vdev_release(struct kref *ref)
571 {
572 struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
573 struct rproc_vring *rvring;
574 struct rproc *rproc = rvdev->rproc;
575 int id;
576
577 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
578 rvring = &rvdev->vring[id];
579 rproc_free_vring(rvring);
580 }
581
582 rproc_remove_subdev(rproc, &rvdev->subdev);
583 list_del(&rvdev->node);
584 device_unregister(&rvdev->dev);
585 }
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603 static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
604 int offset, int avail)
605 {
606 struct rproc_debug_trace *trace;
607 struct device *dev = &rproc->dev;
608 char name[15];
609
610 if (sizeof(*rsc) > avail) {
611 dev_err(dev, "trace rsc is truncated\n");
612 return -EINVAL;
613 }
614
615
616 if (rsc->reserved) {
617 dev_err(dev, "trace rsc has non zero reserved bytes\n");
618 return -EINVAL;
619 }
620
621 trace = kzalloc(sizeof(*trace), GFP_KERNEL);
622 if (!trace)
623 return -ENOMEM;
624
625
626 trace->trace_mem.len = rsc->len;
627 trace->trace_mem.da = rsc->da;
628
629
630 trace->rproc = rproc;
631
632
633 snprintf(name, sizeof(name), "trace%d", rproc->num_traces);
634
635
636 trace->tfile = rproc_create_trace_file(name, rproc, trace);
637 if (!trace->tfile) {
638 kfree(trace);
639 return -EINVAL;
640 }
641
642 list_add_tail(&trace->node, &rproc->traces);
643
644 rproc->num_traces++;
645
646 dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n",
647 name, rsc->da, rsc->len);
648
649 return 0;
650 }
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677 static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
678 int offset, int avail)
679 {
680 struct rproc_mem_entry *mapping;
681 struct device *dev = &rproc->dev;
682 int ret;
683
684
685 if (!rproc->domain)
686 return -EINVAL;
687
688 if (sizeof(*rsc) > avail) {
689 dev_err(dev, "devmem rsc is truncated\n");
690 return -EINVAL;
691 }
692
693
694 if (rsc->reserved) {
695 dev_err(dev, "devmem rsc has non zero reserved bytes\n");
696 return -EINVAL;
697 }
698
699 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
700 if (!mapping)
701 return -ENOMEM;
702
703 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
704 if (ret) {
705 dev_err(dev, "failed to map devmem: %d\n", ret);
706 goto out;
707 }
708
709
710
711
712
713
714
715
716 mapping->da = rsc->da;
717 mapping->len = rsc->len;
718 list_add_tail(&mapping->node, &rproc->mappings);
719
720 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
721 rsc->pa, rsc->da, rsc->len);
722
723 return 0;
724
725 out:
726 kfree(mapping);
727 return ret;
728 }
729
730
731
732
733
734
735
736
737
738 static int rproc_alloc_carveout(struct rproc *rproc,
739 struct rproc_mem_entry *mem)
740 {
741 struct rproc_mem_entry *mapping = NULL;
742 struct device *dev = &rproc->dev;
743 dma_addr_t dma;
744 void *va;
745 int ret;
746
747 va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
748 if (!va) {
749 dev_err(dev->parent,
750 "failed to allocate dma memory: len 0x%x\n", mem->len);
751 return -ENOMEM;
752 }
753
754 dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
755 va, &dma, mem->len);
756
757 if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
758
759
760
761
762
763
764 if (mem->da != (u32)dma)
765 dev_warn(dev->parent,
766 "Allocated carveout doesn't fit device address request\n");
767 }
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786 if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) {
787 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
788 if (!mapping) {
789 ret = -ENOMEM;
790 goto dma_free;
791 }
792
793 ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
794 mem->flags);
795 if (ret) {
796 dev_err(dev, "iommu_map failed: %d\n", ret);
797 goto free_mapping;
798 }
799
800
801
802
803
804
805
806
807 mapping->da = mem->da;
808 mapping->len = mem->len;
809 list_add_tail(&mapping->node, &rproc->mappings);
810
811 dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
812 mem->da, &dma);
813 }
814
815 if (mem->da == FW_RSC_ADDR_ANY) {
816
817 if ((u64)dma & HIGH_BITS_MASK)
818 dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n");
819
820 mem->da = (u32)dma;
821 }
822
823 mem->dma = dma;
824 mem->va = va;
825
826 return 0;
827
828 free_mapping:
829 kfree(mapping);
830 dma_free:
831 dma_free_coherent(dev->parent, mem->len, va, dma);
832 return ret;
833 }
834
835
836
837
838
839
840
841
842
843 static int rproc_release_carveout(struct rproc *rproc,
844 struct rproc_mem_entry *mem)
845 {
846 struct device *dev = &rproc->dev;
847
848
849 dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma);
850 return 0;
851 }
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871 static int rproc_handle_carveout(struct rproc *rproc,
872 struct fw_rsc_carveout *rsc,
873 int offset, int avail)
874 {
875 struct rproc_mem_entry *carveout;
876 struct device *dev = &rproc->dev;
877
878 if (sizeof(*rsc) > avail) {
879 dev_err(dev, "carveout rsc is truncated\n");
880 return -EINVAL;
881 }
882
883
884 if (rsc->reserved) {
885 dev_err(dev, "carveout rsc has non zero reserved bytes\n");
886 return -EINVAL;
887 }
888
889 dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n",
890 rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
891
892
893
894
895
896 carveout = rproc_find_carveout_by_name(rproc, rsc->name);
897
898 if (carveout) {
899 if (carveout->rsc_offset != FW_RSC_ADDR_ANY) {
900 dev_err(dev,
901 "Carveout already associated to resource table\n");
902 return -ENOMEM;
903 }
904
905 if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len))
906 return -ENOMEM;
907
908
909 carveout->rsc_offset = offset;
910 carveout->flags = rsc->flags;
911
912 return 0;
913 }
914
915
916 carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da,
917 rproc_alloc_carveout,
918 rproc_release_carveout, rsc->name);
919 if (!carveout) {
920 dev_err(dev, "Can't allocate memory entry structure\n");
921 return -ENOMEM;
922 }
923
924 carveout->flags = rsc->flags;
925 carveout->rsc_offset = offset;
926 rproc_add_carveout(rproc, carveout);
927
928 return 0;
929 }
930
931
932
933
934
935
936
937
938
939 void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem)
940 {
941 list_add_tail(&mem->node, &rproc->carveouts);
942 }
943 EXPORT_SYMBOL(rproc_add_carveout);
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959 struct rproc_mem_entry *
960 rproc_mem_entry_init(struct device *dev,
961 void *va, dma_addr_t dma, int len, u32 da,
962 int (*alloc)(struct rproc *, struct rproc_mem_entry *),
963 int (*release)(struct rproc *, struct rproc_mem_entry *),
964 const char *name, ...)
965 {
966 struct rproc_mem_entry *mem;
967 va_list args;
968
969 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
970 if (!mem)
971 return mem;
972
973 mem->va = va;
974 mem->dma = dma;
975 mem->da = da;
976 mem->len = len;
977 mem->alloc = alloc;
978 mem->release = release;
979 mem->rsc_offset = FW_RSC_ADDR_ANY;
980 mem->of_resm_idx = -1;
981
982 va_start(args, name);
983 vsnprintf(mem->name, sizeof(mem->name), name, args);
984 va_end(args);
985
986 return mem;
987 }
988 EXPORT_SYMBOL(rproc_mem_entry_init);
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002 struct rproc_mem_entry *
1003 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
1004 u32 da, const char *name, ...)
1005 {
1006 struct rproc_mem_entry *mem;
1007 va_list args;
1008
1009 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1010 if (!mem)
1011 return mem;
1012
1013 mem->da = da;
1014 mem->len = len;
1015 mem->rsc_offset = FW_RSC_ADDR_ANY;
1016 mem->of_resm_idx = of_resm_idx;
1017
1018 va_start(args, name);
1019 vsnprintf(mem->name, sizeof(mem->name), name, args);
1020 va_end(args);
1021
1022 return mem;
1023 }
1024 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
1025
1026
1027
1028
1029
1030 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
1031 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
1032 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
1033 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
1034 [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
1035 };
1036
1037
1038 static int rproc_handle_resources(struct rproc *rproc,
1039 rproc_handle_resource_t handlers[RSC_LAST])
1040 {
1041 struct device *dev = &rproc->dev;
1042 rproc_handle_resource_t handler;
1043 int ret = 0, i;
1044
1045 if (!rproc->table_ptr)
1046 return 0;
1047
1048 for (i = 0; i < rproc->table_ptr->num; i++) {
1049 int offset = rproc->table_ptr->offset[i];
1050 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
1051 int avail = rproc->table_sz - offset - sizeof(*hdr);
1052 void *rsc = (void *)hdr + sizeof(*hdr);
1053
1054
1055 if (avail < 0) {
1056 dev_err(dev, "rsc table is truncated\n");
1057 return -EINVAL;
1058 }
1059
1060 dev_dbg(dev, "rsc: type %d\n", hdr->type);
1061
1062 if (hdr->type >= RSC_VENDOR_START &&
1063 hdr->type <= RSC_VENDOR_END) {
1064 ret = rproc_handle_rsc(rproc, hdr->type, rsc,
1065 offset + sizeof(*hdr), avail);
1066 if (ret == RSC_HANDLED)
1067 continue;
1068 else if (ret < 0)
1069 break;
1070
1071 dev_warn(dev, "unsupported vendor resource %d\n",
1072 hdr->type);
1073 continue;
1074 }
1075
1076 if (hdr->type >= RSC_LAST) {
1077 dev_warn(dev, "unsupported resource %d\n", hdr->type);
1078 continue;
1079 }
1080
1081 handler = handlers[hdr->type];
1082 if (!handler)
1083 continue;
1084
1085 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
1086 if (ret)
1087 break;
1088 }
1089
1090 return ret;
1091 }
1092
1093 static int rproc_prepare_subdevices(struct rproc *rproc)
1094 {
1095 struct rproc_subdev *subdev;
1096 int ret;
1097
1098 list_for_each_entry(subdev, &rproc->subdevs, node) {
1099 if (subdev->prepare) {
1100 ret = subdev->prepare(subdev);
1101 if (ret)
1102 goto unroll_preparation;
1103 }
1104 }
1105
1106 return 0;
1107
1108 unroll_preparation:
1109 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1110 if (subdev->unprepare)
1111 subdev->unprepare(subdev);
1112 }
1113
1114 return ret;
1115 }
1116
1117 static int rproc_start_subdevices(struct rproc *rproc)
1118 {
1119 struct rproc_subdev *subdev;
1120 int ret;
1121
1122 list_for_each_entry(subdev, &rproc->subdevs, node) {
1123 if (subdev->start) {
1124 ret = subdev->start(subdev);
1125 if (ret)
1126 goto unroll_registration;
1127 }
1128 }
1129
1130 return 0;
1131
1132 unroll_registration:
1133 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) {
1134 if (subdev->stop)
1135 subdev->stop(subdev, true);
1136 }
1137
1138 return ret;
1139 }
1140
1141 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed)
1142 {
1143 struct rproc_subdev *subdev;
1144
1145 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1146 if (subdev->stop)
1147 subdev->stop(subdev, crashed);
1148 }
1149 }
1150
1151 static void rproc_unprepare_subdevices(struct rproc *rproc)
1152 {
1153 struct rproc_subdev *subdev;
1154
1155 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) {
1156 if (subdev->unprepare)
1157 subdev->unprepare(subdev);
1158 }
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 static int rproc_alloc_registered_carveouts(struct rproc *rproc)
1173 {
1174 struct rproc_mem_entry *entry, *tmp;
1175 struct fw_rsc_carveout *rsc;
1176 struct device *dev = &rproc->dev;
1177 u64 pa;
1178 int ret;
1179
1180 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1181 if (entry->alloc) {
1182 ret = entry->alloc(rproc, entry);
1183 if (ret) {
1184 dev_err(dev, "Unable to allocate carveout %s: %d\n",
1185 entry->name, ret);
1186 return -ENOMEM;
1187 }
1188 }
1189
1190 if (entry->rsc_offset != FW_RSC_ADDR_ANY) {
1191
1192 rsc = (void *)rproc->table_ptr + entry->rsc_offset;
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 if (entry->va)
1214 pa = (u64)rproc_va_to_pa(entry->va);
1215 else
1216 pa = (u64)entry->dma;
1217
1218 if (((u64)pa) & HIGH_BITS_MASK)
1219 dev_warn(dev,
1220 "Physical address cast in 32bit to fit resource table format\n");
1221
1222 rsc->pa = (u32)pa;
1223 rsc->da = entry->da;
1224 rsc->len = entry->len;
1225 }
1226 }
1227
1228 return 0;
1229 }
1230
1231
1232
1233
1234
1235 static void rproc_coredump_cleanup(struct rproc *rproc)
1236 {
1237 struct rproc_dump_segment *entry, *tmp;
1238
1239 list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
1240 list_del(&entry->node);
1241 kfree(entry);
1242 }
1243 }
1244
1245
1246
1247
1248
1249
1250
1251
1252 static void rproc_resource_cleanup(struct rproc *rproc)
1253 {
1254 struct rproc_mem_entry *entry, *tmp;
1255 struct rproc_debug_trace *trace, *ttmp;
1256 struct rproc_vdev *rvdev, *rvtmp;
1257 struct device *dev = &rproc->dev;
1258
1259
1260 list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) {
1261 rproc_remove_trace_file(trace->tfile);
1262 rproc->num_traces--;
1263 list_del(&trace->node);
1264 kfree(trace);
1265 }
1266
1267
1268 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
1269 size_t unmapped;
1270
1271 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
1272 if (unmapped != entry->len) {
1273
1274 dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
1275 unmapped);
1276 }
1277
1278 list_del(&entry->node);
1279 kfree(entry);
1280 }
1281
1282
1283 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
1284 if (entry->release)
1285 entry->release(rproc, entry);
1286 list_del(&entry->node);
1287 kfree(entry);
1288 }
1289
1290
1291 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
1292 kref_put(&rvdev->refcount, rproc_vdev_release);
1293
1294 rproc_coredump_cleanup(rproc);
1295 }
1296
1297 static int rproc_start(struct rproc *rproc, const struct firmware *fw)
1298 {
1299 struct resource_table *loaded_table;
1300 struct device *dev = &rproc->dev;
1301 int ret;
1302
1303
1304 ret = rproc_load_segments(rproc, fw);
1305 if (ret) {
1306 dev_err(dev, "Failed to load program segments: %d\n", ret);
1307 return ret;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
1319 if (loaded_table) {
1320 memcpy(loaded_table, rproc->cached_table, rproc->table_sz);
1321 rproc->table_ptr = loaded_table;
1322 }
1323
1324 ret = rproc_prepare_subdevices(rproc);
1325 if (ret) {
1326 dev_err(dev, "failed to prepare subdevices for %s: %d\n",
1327 rproc->name, ret);
1328 goto reset_table_ptr;
1329 }
1330
1331
1332 ret = rproc->ops->start(rproc);
1333 if (ret) {
1334 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
1335 goto unprepare_subdevices;
1336 }
1337
1338
1339 ret = rproc_start_subdevices(rproc);
1340 if (ret) {
1341 dev_err(dev, "failed to probe subdevices for %s: %d\n",
1342 rproc->name, ret);
1343 goto stop_rproc;
1344 }
1345
1346 rproc->state = RPROC_RUNNING;
1347
1348 dev_info(dev, "remote processor %s is now up\n", rproc->name);
1349
1350 return 0;
1351
1352 stop_rproc:
1353 rproc->ops->stop(rproc);
1354 unprepare_subdevices:
1355 rproc_unprepare_subdevices(rproc);
1356 reset_table_ptr:
1357 rproc->table_ptr = rproc->cached_table;
1358
1359 return ret;
1360 }
1361
1362
1363
1364
1365 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1366 {
1367 struct device *dev = &rproc->dev;
1368 const char *name = rproc->firmware;
1369 int ret;
1370
1371 ret = rproc_fw_sanity_check(rproc, fw);
1372 if (ret)
1373 return ret;
1374
1375 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
1376
1377
1378
1379
1380
1381 ret = rproc_enable_iommu(rproc);
1382 if (ret) {
1383 dev_err(dev, "can't enable iommu: %d\n", ret);
1384 return ret;
1385 }
1386
1387 rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
1388
1389
1390 ret = rproc_parse_fw(rproc, fw);
1391 if (ret)
1392 goto disable_iommu;
1393
1394
1395 rproc->max_notifyid = -1;
1396
1397
1398 rproc->nb_vdev = 0;
1399
1400
1401 ret = rproc_handle_resources(rproc, rproc_loading_handlers);
1402 if (ret) {
1403 dev_err(dev, "Failed to process resources: %d\n", ret);
1404 goto clean_up_resources;
1405 }
1406
1407
1408 ret = rproc_alloc_registered_carveouts(rproc);
1409 if (ret) {
1410 dev_err(dev, "Failed to allocate associated carveouts: %d\n",
1411 ret);
1412 goto clean_up_resources;
1413 }
1414
1415 ret = rproc_start(rproc, fw);
1416 if (ret)
1417 goto clean_up_resources;
1418
1419 return 0;
1420
1421 clean_up_resources:
1422 rproc_resource_cleanup(rproc);
1423 kfree(rproc->cached_table);
1424 rproc->cached_table = NULL;
1425 rproc->table_ptr = NULL;
1426 disable_iommu:
1427 rproc_disable_iommu(rproc);
1428 return ret;
1429 }
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 static void rproc_auto_boot_callback(const struct firmware *fw, void *context)
1440 {
1441 struct rproc *rproc = context;
1442
1443 rproc_boot(rproc);
1444
1445 release_firmware(fw);
1446 }
1447
1448 static int rproc_trigger_auto_boot(struct rproc *rproc)
1449 {
1450 int ret;
1451
1452
1453
1454
1455
1456 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
1457 rproc->firmware, &rproc->dev, GFP_KERNEL,
1458 rproc, rproc_auto_boot_callback);
1459 if (ret < 0)
1460 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
1461
1462 return ret;
1463 }
1464
1465 static int rproc_stop(struct rproc *rproc, bool crashed)
1466 {
1467 struct device *dev = &rproc->dev;
1468 int ret;
1469
1470
1471 rproc_stop_subdevices(rproc, crashed);
1472
1473
1474 rproc->table_ptr = rproc->cached_table;
1475
1476
1477 ret = rproc->ops->stop(rproc);
1478 if (ret) {
1479 dev_err(dev, "can't stop rproc: %d\n", ret);
1480 return ret;
1481 }
1482
1483 rproc_unprepare_subdevices(rproc);
1484
1485 rproc->state = RPROC_OFFLINE;
1486
1487 dev_info(dev, "stopped remote processor %s\n", rproc->name);
1488
1489 return 0;
1490 }
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503 int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
1504 {
1505 struct rproc_dump_segment *segment;
1506
1507 segment = kzalloc(sizeof(*segment), GFP_KERNEL);
1508 if (!segment)
1509 return -ENOMEM;
1510
1511 segment->da = da;
1512 segment->size = size;
1513
1514 list_add_tail(&segment->node, &rproc->dump_segments);
1515
1516 return 0;
1517 }
1518 EXPORT_SYMBOL(rproc_coredump_add_segment);
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 int rproc_coredump_add_custom_segment(struct rproc *rproc,
1535 dma_addr_t da, size_t size,
1536 void (*dumpfn)(struct rproc *rproc,
1537 struct rproc_dump_segment *segment,
1538 void *dest),
1539 void *priv)
1540 {
1541 struct rproc_dump_segment *segment;
1542
1543 segment = kzalloc(sizeof(*segment), GFP_KERNEL);
1544 if (!segment)
1545 return -ENOMEM;
1546
1547 segment->da = da;
1548 segment->size = size;
1549 segment->priv = priv;
1550 segment->dump = dumpfn;
1551
1552 list_add_tail(&segment->node, &rproc->dump_segments);
1553
1554 return 0;
1555 }
1556 EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
1557
1558
1559
1560
1561
1562
1563
1564
1565 static void rproc_coredump(struct rproc *rproc)
1566 {
1567 struct rproc_dump_segment *segment;
1568 struct elf32_phdr *phdr;
1569 struct elf32_hdr *ehdr;
1570 size_t data_size;
1571 size_t offset;
1572 void *data;
1573 void *ptr;
1574 int phnum = 0;
1575
1576 if (list_empty(&rproc->dump_segments))
1577 return;
1578
1579 data_size = sizeof(*ehdr);
1580 list_for_each_entry(segment, &rproc->dump_segments, node) {
1581 data_size += sizeof(*phdr) + segment->size;
1582
1583 phnum++;
1584 }
1585
1586 data = vmalloc(data_size);
1587 if (!data)
1588 return;
1589
1590 ehdr = data;
1591
1592 memset(ehdr, 0, sizeof(*ehdr));
1593 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1594 ehdr->e_ident[EI_CLASS] = ELFCLASS32;
1595 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1596 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1597 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
1598 ehdr->e_type = ET_CORE;
1599 ehdr->e_machine = EM_NONE;
1600 ehdr->e_version = EV_CURRENT;
1601 ehdr->e_entry = rproc->bootaddr;
1602 ehdr->e_phoff = sizeof(*ehdr);
1603 ehdr->e_ehsize = sizeof(*ehdr);
1604 ehdr->e_phentsize = sizeof(*phdr);
1605 ehdr->e_phnum = phnum;
1606
1607 phdr = data + ehdr->e_phoff;
1608 offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
1609 list_for_each_entry(segment, &rproc->dump_segments, node) {
1610 memset(phdr, 0, sizeof(*phdr));
1611 phdr->p_type = PT_LOAD;
1612 phdr->p_offset = offset;
1613 phdr->p_vaddr = segment->da;
1614 phdr->p_paddr = segment->da;
1615 phdr->p_filesz = segment->size;
1616 phdr->p_memsz = segment->size;
1617 phdr->p_flags = PF_R | PF_W | PF_X;
1618 phdr->p_align = 0;
1619
1620 if (segment->dump) {
1621 segment->dump(rproc, segment, data + offset);
1622 } else {
1623 ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1624 if (!ptr) {
1625 dev_err(&rproc->dev,
1626 "invalid coredump segment (%pad, %zu)\n",
1627 &segment->da, segment->size);
1628 memset(data + offset, 0xff, segment->size);
1629 } else {
1630 memcpy(data + offset, ptr, segment->size);
1631 }
1632 }
1633
1634 offset += phdr->p_filesz;
1635 phdr++;
1636 }
1637
1638 dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 int rproc_trigger_recovery(struct rproc *rproc)
1652 {
1653 const struct firmware *firmware_p;
1654 struct device *dev = &rproc->dev;
1655 int ret;
1656
1657 dev_err(dev, "recovering %s\n", rproc->name);
1658
1659 ret = mutex_lock_interruptible(&rproc->lock);
1660 if (ret)
1661 return ret;
1662
1663 ret = rproc_stop(rproc, true);
1664 if (ret)
1665 goto unlock_mutex;
1666
1667
1668 rproc_coredump(rproc);
1669
1670
1671 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1672 if (ret < 0) {
1673 dev_err(dev, "request_firmware failed: %d\n", ret);
1674 goto unlock_mutex;
1675 }
1676
1677
1678 ret = rproc_start(rproc, firmware_p);
1679
1680 release_firmware(firmware_p);
1681
1682 unlock_mutex:
1683 mutex_unlock(&rproc->lock);
1684 return ret;
1685 }
1686
1687
1688
1689
1690
1691
1692
1693 static void rproc_crash_handler_work(struct work_struct *work)
1694 {
1695 struct rproc *rproc = container_of(work, struct rproc, crash_handler);
1696 struct device *dev = &rproc->dev;
1697
1698 dev_dbg(dev, "enter %s\n", __func__);
1699
1700 mutex_lock(&rproc->lock);
1701
1702 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
1703
1704 mutex_unlock(&rproc->lock);
1705 return;
1706 }
1707
1708 rproc->state = RPROC_CRASHED;
1709 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
1710 rproc->name);
1711
1712 mutex_unlock(&rproc->lock);
1713
1714 if (!rproc->recovery_disabled)
1715 rproc_trigger_recovery(rproc);
1716 }
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729 int rproc_boot(struct rproc *rproc)
1730 {
1731 const struct firmware *firmware_p;
1732 struct device *dev;
1733 int ret;
1734
1735 if (!rproc) {
1736 pr_err("invalid rproc handle\n");
1737 return -EINVAL;
1738 }
1739
1740 dev = &rproc->dev;
1741
1742 ret = mutex_lock_interruptible(&rproc->lock);
1743 if (ret) {
1744 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1745 return ret;
1746 }
1747
1748 if (rproc->state == RPROC_DELETED) {
1749 ret = -ENODEV;
1750 dev_err(dev, "can't boot deleted rproc %s\n", rproc->name);
1751 goto unlock_mutex;
1752 }
1753
1754
1755 if (atomic_inc_return(&rproc->power) > 1) {
1756 ret = 0;
1757 goto unlock_mutex;
1758 }
1759
1760 dev_info(dev, "powering up %s\n", rproc->name);
1761
1762
1763 ret = request_firmware(&firmware_p, rproc->firmware, dev);
1764 if (ret < 0) {
1765 dev_err(dev, "request_firmware failed: %d\n", ret);
1766 goto downref_rproc;
1767 }
1768
1769 ret = rproc_fw_boot(rproc, firmware_p);
1770
1771 release_firmware(firmware_p);
1772
1773 downref_rproc:
1774 if (ret)
1775 atomic_dec(&rproc->power);
1776 unlock_mutex:
1777 mutex_unlock(&rproc->lock);
1778 return ret;
1779 }
1780 EXPORT_SYMBOL(rproc_boot);
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801 void rproc_shutdown(struct rproc *rproc)
1802 {
1803 struct device *dev = &rproc->dev;
1804 int ret;
1805
1806 ret = mutex_lock_interruptible(&rproc->lock);
1807 if (ret) {
1808 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
1809 return;
1810 }
1811
1812
1813 if (!atomic_dec_and_test(&rproc->power))
1814 goto out;
1815
1816 ret = rproc_stop(rproc, false);
1817 if (ret) {
1818 atomic_inc(&rproc->power);
1819 goto out;
1820 }
1821
1822
1823 rproc_resource_cleanup(rproc);
1824
1825 rproc_disable_iommu(rproc);
1826
1827
1828 kfree(rproc->cached_table);
1829 rproc->cached_table = NULL;
1830 rproc->table_ptr = NULL;
1831 out:
1832 mutex_unlock(&rproc->lock);
1833 }
1834 EXPORT_SYMBOL(rproc_shutdown);
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 #ifdef CONFIG_OF
1849 struct rproc *rproc_get_by_phandle(phandle phandle)
1850 {
1851 struct rproc *rproc = NULL, *r;
1852 struct device_node *np;
1853
1854 np = of_find_node_by_phandle(phandle);
1855 if (!np)
1856 return NULL;
1857
1858 mutex_lock(&rproc_list_mutex);
1859 list_for_each_entry(r, &rproc_list, node) {
1860 if (r->dev.parent && r->dev.parent->of_node == np) {
1861
1862 if (!try_module_get(r->dev.parent->driver->owner)) {
1863 dev_err(&r->dev, "can't get owner\n");
1864 break;
1865 }
1866
1867 rproc = r;
1868 get_device(&rproc->dev);
1869 break;
1870 }
1871 }
1872 mutex_unlock(&rproc_list_mutex);
1873
1874 of_node_put(np);
1875
1876 return rproc;
1877 }
1878 #else
1879 struct rproc *rproc_get_by_phandle(phandle phandle)
1880 {
1881 return NULL;
1882 }
1883 #endif
1884 EXPORT_SYMBOL(rproc_get_by_phandle);
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 int rproc_add(struct rproc *rproc)
1907 {
1908 struct device *dev = &rproc->dev;
1909 int ret;
1910
1911 ret = device_add(dev);
1912 if (ret < 0)
1913 return ret;
1914
1915 dev_info(dev, "%s is available\n", rproc->name);
1916
1917
1918 rproc_create_debug_dir(rproc);
1919
1920
1921 if (rproc->auto_boot) {
1922 ret = rproc_trigger_auto_boot(rproc);
1923 if (ret < 0)
1924 return ret;
1925 }
1926
1927
1928 mutex_lock(&rproc_list_mutex);
1929 list_add(&rproc->node, &rproc_list);
1930 mutex_unlock(&rproc_list_mutex);
1931
1932 return 0;
1933 }
1934 EXPORT_SYMBOL(rproc_add);
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945 static void rproc_type_release(struct device *dev)
1946 {
1947 struct rproc *rproc = container_of(dev, struct rproc, dev);
1948
1949 dev_info(&rproc->dev, "releasing %s\n", rproc->name);
1950
1951 idr_destroy(&rproc->notifyids);
1952
1953 if (rproc->index >= 0)
1954 ida_simple_remove(&rproc_dev_index, rproc->index);
1955
1956 kfree(rproc->firmware);
1957 kfree(rproc->ops);
1958 kfree(rproc);
1959 }
1960
1961 static const struct device_type rproc_type = {
1962 .name = "remoteproc",
1963 .release = rproc_type_release,
1964 };
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989 struct rproc *rproc_alloc(struct device *dev, const char *name,
1990 const struct rproc_ops *ops,
1991 const char *firmware, int len)
1992 {
1993 struct rproc *rproc;
1994 char *p, *template = "rproc-%s-fw";
1995 int name_len;
1996
1997 if (!dev || !name || !ops)
1998 return NULL;
1999
2000 if (!firmware) {
2001
2002
2003
2004
2005 name_len = strlen(name) + strlen(template) - 2 + 1;
2006 p = kmalloc(name_len, GFP_KERNEL);
2007 if (!p)
2008 return NULL;
2009 snprintf(p, name_len, template, name);
2010 } else {
2011 p = kstrdup(firmware, GFP_KERNEL);
2012 if (!p)
2013 return NULL;
2014 }
2015
2016 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
2017 if (!rproc) {
2018 kfree(p);
2019 return NULL;
2020 }
2021
2022 rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
2023 if (!rproc->ops) {
2024 kfree(p);
2025 kfree(rproc);
2026 return NULL;
2027 }
2028
2029 rproc->firmware = p;
2030 rproc->name = name;
2031 rproc->priv = &rproc[1];
2032 rproc->auto_boot = true;
2033
2034 device_initialize(&rproc->dev);
2035 rproc->dev.parent = dev;
2036 rproc->dev.type = &rproc_type;
2037 rproc->dev.class = &rproc_class;
2038 rproc->dev.driver_data = rproc;
2039
2040
2041 rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
2042 if (rproc->index < 0) {
2043 dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
2044 put_device(&rproc->dev);
2045 return NULL;
2046 }
2047
2048 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
2049
2050 atomic_set(&rproc->power, 0);
2051
2052
2053 if (!rproc->ops->load) {
2054 rproc->ops->load = rproc_elf_load_segments;
2055 rproc->ops->parse_fw = rproc_elf_load_rsc_table;
2056 rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
2057 rproc->ops->sanity_check = rproc_elf_sanity_check;
2058 rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
2059 }
2060
2061 mutex_init(&rproc->lock);
2062
2063 idr_init(&rproc->notifyids);
2064
2065 INIT_LIST_HEAD(&rproc->carveouts);
2066 INIT_LIST_HEAD(&rproc->mappings);
2067 INIT_LIST_HEAD(&rproc->traces);
2068 INIT_LIST_HEAD(&rproc->rvdevs);
2069 INIT_LIST_HEAD(&rproc->subdevs);
2070 INIT_LIST_HEAD(&rproc->dump_segments);
2071
2072 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
2073
2074 rproc->state = RPROC_OFFLINE;
2075
2076 return rproc;
2077 }
2078 EXPORT_SYMBOL(rproc_alloc);
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 void rproc_free(struct rproc *rproc)
2090 {
2091 put_device(&rproc->dev);
2092 }
2093 EXPORT_SYMBOL(rproc_free);
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104 void rproc_put(struct rproc *rproc)
2105 {
2106 module_put(rproc->dev.parent->driver->owner);
2107 put_device(&rproc->dev);
2108 }
2109 EXPORT_SYMBOL(rproc_put);
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 int rproc_del(struct rproc *rproc)
2127 {
2128 if (!rproc)
2129 return -EINVAL;
2130
2131
2132
2133 if (rproc->auto_boot)
2134 rproc_shutdown(rproc);
2135
2136 mutex_lock(&rproc->lock);
2137 rproc->state = RPROC_DELETED;
2138 mutex_unlock(&rproc->lock);
2139
2140 rproc_delete_debug_dir(rproc);
2141
2142
2143 mutex_lock(&rproc_list_mutex);
2144 list_del(&rproc->node);
2145 mutex_unlock(&rproc_list_mutex);
2146
2147 device_del(&rproc->dev);
2148
2149 return 0;
2150 }
2151 EXPORT_SYMBOL(rproc_del);
2152
2153
2154
2155
2156
2157
2158
2159
2160 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2161 {
2162 list_add_tail(&subdev->node, &rproc->subdevs);
2163 }
2164 EXPORT_SYMBOL(rproc_add_subdev);
2165
2166
2167
2168
2169
2170
2171 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev)
2172 {
2173 list_del(&subdev->node);
2174 }
2175 EXPORT_SYMBOL(rproc_remove_subdev);
2176
2177
2178
2179
2180
2181
2182
2183 struct rproc *rproc_get_by_child(struct device *dev)
2184 {
2185 for (dev = dev->parent; dev; dev = dev->parent) {
2186 if (dev->type == &rproc_type)
2187 return dev->driver_data;
2188 }
2189
2190 return NULL;
2191 }
2192 EXPORT_SYMBOL(rproc_get_by_child);
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
2206 {
2207 if (!rproc) {
2208 pr_err("NULL rproc pointer\n");
2209 return;
2210 }
2211
2212 dev_err(&rproc->dev, "crash detected in %s: type %s\n",
2213 rproc->name, rproc_crash_to_string(type));
2214
2215
2216 schedule_work(&rproc->crash_handler);
2217 }
2218 EXPORT_SYMBOL(rproc_report_crash);
2219
2220 static int __init remoteproc_init(void)
2221 {
2222 rproc_init_sysfs();
2223 rproc_init_debugfs();
2224
2225 return 0;
2226 }
2227 subsys_initcall(remoteproc_init);
2228
2229 static void __exit remoteproc_exit(void)
2230 {
2231 ida_destroy(&rproc_dev_index);
2232
2233 rproc_exit_debugfs();
2234 rproc_exit_sysfs();
2235 }
2236 module_exit(remoteproc_exit);
2237
2238 MODULE_LICENSE("GPL v2");
2239 MODULE_DESCRIPTION("Generic Remote Processor Framework");