This source file includes following definitions.
- phdr_to_last_uncached_entry
- phdr_to_first_cached_entry
- phdr_to_last_cached_entry
- phdr_to_first_uncached_entry
- uncached_entry_next
- cached_entry_next
- uncached_entry_to_item
- cached_entry_to_item
- qcom_smem_alloc_private
- qcom_smem_alloc_global
- qcom_smem_alloc
- qcom_smem_get_global
- qcom_smem_get_private
- qcom_smem_get
- qcom_smem_get_free_space
- qcom_smem_virt_to_phys
- qcom_smem_get_sbl_version
- qcom_smem_get_ptable
- qcom_smem_get_item_count
- qcom_smem_partition_header
- qcom_smem_set_global_partition
- qcom_smem_enumerate_partitions
- qcom_smem_map_memory
- qcom_smem_probe
- qcom_smem_remove
- qcom_smem_init
- qcom_smem_exit
1
2
3
4
5
6
7 #include <linux/hwspinlock.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/platform_device.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/soc/qcom/smem.h>
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 #define SMEM_MASTER_SBL_VERSION_INDEX 7
68 #define SMEM_GLOBAL_HEAP_VERSION 11
69 #define SMEM_GLOBAL_PART_VERSION 12
70
71
72
73
74
75 #define SMEM_ITEM_LAST_FIXED 8
76
77
78 #define SMEM_ITEM_COUNT 512
79
80
81 #define SMEM_HOST_APPS 0
82
83
84 #define SMEM_GLOBAL_HOST 0xfffe
85
86
87 #define SMEM_HOST_COUNT 11
88
89
90
91
92
93
94
95 struct smem_proc_comm {
96 __le32 command;
97 __le32 status;
98 __le32 params[2];
99 };
100
101
102
103
104
105
106
107
108
109 struct smem_global_entry {
110 __le32 allocated;
111 __le32 offset;
112 __le32 size;
113 __le32 aux_base;
114 };
115 #define AUX_BASE_MASK 0xfffffffc
116
117
118
119
120
121
122
123
124
125
126
127 struct smem_header {
128 struct smem_proc_comm proc_comm[4];
129 __le32 version[32];
130 __le32 initialized;
131 __le32 free_offset;
132 __le32 available;
133 __le32 reserved;
134 struct smem_global_entry toc[SMEM_ITEM_COUNT];
135 };
136
137
138
139
140
141
142
143
144
145
146
147 struct smem_ptable_entry {
148 __le32 offset;
149 __le32 size;
150 __le32 flags;
151 __le16 host0;
152 __le16 host1;
153 __le32 cacheline;
154 __le32 reserved[7];
155 };
156
157
158
159
160
161
162
163
164
165 struct smem_ptable {
166 u8 magic[4];
167 __le32 version;
168 __le32 num_entries;
169 __le32 reserved[5];
170 struct smem_ptable_entry entry[];
171 };
172
173 static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 };
174
175
176
177
178
179
180
181
182
183
184
185
186
187 struct smem_partition_header {
188 u8 magic[4];
189 __le16 host0;
190 __le16 host1;
191 __le32 size;
192 __le32 offset_free_uncached;
193 __le32 offset_free_cached;
194 __le32 reserved[3];
195 };
196
197 static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
198
199
200
201
202
203
204
205
206
207
208 struct smem_private_entry {
209 u16 canary;
210 __le16 item;
211 __le32 size;
212 __le16 padding_data;
213 __le16 padding_hdr;
214 __le32 reserved;
215 };
216 #define SMEM_PRIVATE_CANARY 0xa5a5
217
218
219
220
221
222
223
224
225
226 struct smem_info {
227 u8 magic[4];
228 __le32 size;
229 __le32 base_addr;
230 __le32 reserved;
231 __le16 num_items;
232 };
233
234 static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 };
235
236
237
238
239
240
241
242 struct smem_region {
243 u32 aux_base;
244 void __iomem *virt_base;
245 size_t size;
246 };
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 struct qcom_smem {
262 struct device *dev;
263
264 struct hwspinlock *hwlock;
265
266 struct smem_partition_header *global_partition;
267 size_t global_cacheline;
268 struct smem_partition_header *partitions[SMEM_HOST_COUNT];
269 size_t cacheline[SMEM_HOST_COUNT];
270 u32 item_count;
271 struct platform_device *socinfo;
272
273 unsigned num_regions;
274 struct smem_region regions[];
275 };
276
277 static void *
278 phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
279 {
280 void *p = phdr;
281
282 return p + le32_to_cpu(phdr->offset_free_uncached);
283 }
284
285 static struct smem_private_entry *
286 phdr_to_first_cached_entry(struct smem_partition_header *phdr,
287 size_t cacheline)
288 {
289 void *p = phdr;
290 struct smem_private_entry *e;
291
292 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline);
293 }
294
295 static void *
296 phdr_to_last_cached_entry(struct smem_partition_header *phdr)
297 {
298 void *p = phdr;
299
300 return p + le32_to_cpu(phdr->offset_free_cached);
301 }
302
303 static struct smem_private_entry *
304 phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
305 {
306 void *p = phdr;
307
308 return p + sizeof(*phdr);
309 }
310
311 static struct smem_private_entry *
312 uncached_entry_next(struct smem_private_entry *e)
313 {
314 void *p = e;
315
316 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
317 le32_to_cpu(e->size);
318 }
319
320 static struct smem_private_entry *
321 cached_entry_next(struct smem_private_entry *e, size_t cacheline)
322 {
323 void *p = e;
324
325 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
326 }
327
328 static void *uncached_entry_to_item(struct smem_private_entry *e)
329 {
330 void *p = e;
331
332 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
333 }
334
335 static void *cached_entry_to_item(struct smem_private_entry *e)
336 {
337 void *p = e;
338
339 return p - le32_to_cpu(e->size);
340 }
341
342
343 static struct qcom_smem *__smem;
344
345
346 #define HWSPINLOCK_TIMEOUT 1000
347
348 static int qcom_smem_alloc_private(struct qcom_smem *smem,
349 struct smem_partition_header *phdr,
350 unsigned item,
351 size_t size)
352 {
353 struct smem_private_entry *hdr, *end;
354 size_t alloc_size;
355 void *cached;
356
357 hdr = phdr_to_first_uncached_entry(phdr);
358 end = phdr_to_last_uncached_entry(phdr);
359 cached = phdr_to_last_cached_entry(phdr);
360
361 while (hdr < end) {
362 if (hdr->canary != SMEM_PRIVATE_CANARY)
363 goto bad_canary;
364 if (le16_to_cpu(hdr->item) == item)
365 return -EEXIST;
366
367 hdr = uncached_entry_next(hdr);
368 }
369
370
371 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
372 if ((void *)hdr + alloc_size > cached) {
373 dev_err(smem->dev, "Out of memory\n");
374 return -ENOSPC;
375 }
376
377 hdr->canary = SMEM_PRIVATE_CANARY;
378 hdr->item = cpu_to_le16(item);
379 hdr->size = cpu_to_le32(ALIGN(size, 8));
380 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
381 hdr->padding_hdr = 0;
382
383
384
385
386
387
388 wmb();
389 le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
390
391 return 0;
392 bad_canary:
393 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
394 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
395
396 return -EINVAL;
397 }
398
399 static int qcom_smem_alloc_global(struct qcom_smem *smem,
400 unsigned item,
401 size_t size)
402 {
403 struct smem_global_entry *entry;
404 struct smem_header *header;
405
406 header = smem->regions[0].virt_base;
407 entry = &header->toc[item];
408 if (entry->allocated)
409 return -EEXIST;
410
411 size = ALIGN(size, 8);
412 if (WARN_ON(size > le32_to_cpu(header->available)))
413 return -ENOMEM;
414
415 entry->offset = header->free_offset;
416 entry->size = cpu_to_le32(size);
417
418
419
420
421
422
423 wmb();
424 entry->allocated = cpu_to_le32(1);
425
426 le32_add_cpu(&header->free_offset, size);
427 le32_add_cpu(&header->available, -size);
428
429 return 0;
430 }
431
432
433
434
435
436
437
438
439
440
441 int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
442 {
443 struct smem_partition_header *phdr;
444 unsigned long flags;
445 int ret;
446
447 if (!__smem)
448 return -EPROBE_DEFER;
449
450 if (item < SMEM_ITEM_LAST_FIXED) {
451 dev_err(__smem->dev,
452 "Rejecting allocation of static entry %d\n", item);
453 return -EINVAL;
454 }
455
456 if (WARN_ON(item >= __smem->item_count))
457 return -EINVAL;
458
459 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
460 HWSPINLOCK_TIMEOUT,
461 &flags);
462 if (ret)
463 return ret;
464
465 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
466 phdr = __smem->partitions[host];
467 ret = qcom_smem_alloc_private(__smem, phdr, item, size);
468 } else if (__smem->global_partition) {
469 phdr = __smem->global_partition;
470 ret = qcom_smem_alloc_private(__smem, phdr, item, size);
471 } else {
472 ret = qcom_smem_alloc_global(__smem, item, size);
473 }
474
475 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
476
477 return ret;
478 }
479 EXPORT_SYMBOL(qcom_smem_alloc);
480
481 static void *qcom_smem_get_global(struct qcom_smem *smem,
482 unsigned item,
483 size_t *size)
484 {
485 struct smem_header *header;
486 struct smem_region *region;
487 struct smem_global_entry *entry;
488 u32 aux_base;
489 unsigned i;
490
491 header = smem->regions[0].virt_base;
492 entry = &header->toc[item];
493 if (!entry->allocated)
494 return ERR_PTR(-ENXIO);
495
496 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
497
498 for (i = 0; i < smem->num_regions; i++) {
499 region = &smem->regions[i];
500
501 if (region->aux_base == aux_base || !aux_base) {
502 if (size != NULL)
503 *size = le32_to_cpu(entry->size);
504 return region->virt_base + le32_to_cpu(entry->offset);
505 }
506 }
507
508 return ERR_PTR(-ENOENT);
509 }
510
511 static void *qcom_smem_get_private(struct qcom_smem *smem,
512 struct smem_partition_header *phdr,
513 size_t cacheline,
514 unsigned item,
515 size_t *size)
516 {
517 struct smem_private_entry *e, *end;
518
519 e = phdr_to_first_uncached_entry(phdr);
520 end = phdr_to_last_uncached_entry(phdr);
521
522 while (e < end) {
523 if (e->canary != SMEM_PRIVATE_CANARY)
524 goto invalid_canary;
525
526 if (le16_to_cpu(e->item) == item) {
527 if (size != NULL)
528 *size = le32_to_cpu(e->size) -
529 le16_to_cpu(e->padding_data);
530
531 return uncached_entry_to_item(e);
532 }
533
534 e = uncached_entry_next(e);
535 }
536
537
538
539 e = phdr_to_first_cached_entry(phdr, cacheline);
540 end = phdr_to_last_cached_entry(phdr);
541
542 while (e > end) {
543 if (e->canary != SMEM_PRIVATE_CANARY)
544 goto invalid_canary;
545
546 if (le16_to_cpu(e->item) == item) {
547 if (size != NULL)
548 *size = le32_to_cpu(e->size) -
549 le16_to_cpu(e->padding_data);
550
551 return cached_entry_to_item(e);
552 }
553
554 e = cached_entry_next(e, cacheline);
555 }
556
557 return ERR_PTR(-ENOENT);
558
559 invalid_canary:
560 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n",
561 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1));
562
563 return ERR_PTR(-EINVAL);
564 }
565
566
567
568
569
570
571
572
573
574
575 void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
576 {
577 struct smem_partition_header *phdr;
578 unsigned long flags;
579 size_t cacheln;
580 int ret;
581 void *ptr = ERR_PTR(-EPROBE_DEFER);
582
583 if (!__smem)
584 return ptr;
585
586 if (WARN_ON(item >= __smem->item_count))
587 return ERR_PTR(-EINVAL);
588
589 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
590 HWSPINLOCK_TIMEOUT,
591 &flags);
592 if (ret)
593 return ERR_PTR(ret);
594
595 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
596 phdr = __smem->partitions[host];
597 cacheln = __smem->cacheline[host];
598 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
599 } else if (__smem->global_partition) {
600 phdr = __smem->global_partition;
601 cacheln = __smem->global_cacheline;
602 ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
603 } else {
604 ptr = qcom_smem_get_global(__smem, item, size);
605 }
606
607 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
608
609 return ptr;
610
611 }
612 EXPORT_SYMBOL(qcom_smem_get);
613
614
615
616
617
618
619
620
621 int qcom_smem_get_free_space(unsigned host)
622 {
623 struct smem_partition_header *phdr;
624 struct smem_header *header;
625 unsigned ret;
626
627 if (!__smem)
628 return -EPROBE_DEFER;
629
630 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
631 phdr = __smem->partitions[host];
632 ret = le32_to_cpu(phdr->offset_free_cached) -
633 le32_to_cpu(phdr->offset_free_uncached);
634 } else if (__smem->global_partition) {
635 phdr = __smem->global_partition;
636 ret = le32_to_cpu(phdr->offset_free_cached) -
637 le32_to_cpu(phdr->offset_free_uncached);
638 } else {
639 header = __smem->regions[0].virt_base;
640 ret = le32_to_cpu(header->available);
641 }
642
643 return ret;
644 }
645 EXPORT_SYMBOL(qcom_smem_get_free_space);
646
647
648
649
650
651
652
653
654 phys_addr_t qcom_smem_virt_to_phys(void *p)
655 {
656 unsigned i;
657
658 for (i = 0; i < __smem->num_regions; i++) {
659 struct smem_region *region = &__smem->regions[i];
660
661 if (p < region->virt_base)
662 continue;
663 if (p < region->virt_base + region->size) {
664 u64 offset = p - region->virt_base;
665
666 return (phys_addr_t)region->aux_base + offset;
667 }
668 }
669
670 return 0;
671 }
672 EXPORT_SYMBOL(qcom_smem_virt_to_phys);
673
674 static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
675 {
676 struct smem_header *header;
677 __le32 *versions;
678
679 header = smem->regions[0].virt_base;
680 versions = header->version;
681
682 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
683 }
684
685 static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
686 {
687 struct smem_ptable *ptable;
688 u32 version;
689
690 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
691 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
692 return ERR_PTR(-ENOENT);
693
694 version = le32_to_cpu(ptable->version);
695 if (version != 1) {
696 dev_err(smem->dev,
697 "Unsupported partition header version %d\n", version);
698 return ERR_PTR(-EINVAL);
699 }
700 return ptable;
701 }
702
703 static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
704 {
705 struct smem_ptable *ptable;
706 struct smem_info *info;
707
708 ptable = qcom_smem_get_ptable(smem);
709 if (IS_ERR_OR_NULL(ptable))
710 return SMEM_ITEM_COUNT;
711
712 info = (struct smem_info *)&ptable->entry[ptable->num_entries];
713 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
714 return SMEM_ITEM_COUNT;
715
716 return le16_to_cpu(info->num_items);
717 }
718
719
720
721
722
723
724 static struct smem_partition_header *
725 qcom_smem_partition_header(struct qcom_smem *smem,
726 struct smem_ptable_entry *entry, u16 host0, u16 host1)
727 {
728 struct smem_partition_header *header;
729 u32 size;
730
731 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
732
733 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
734 dev_err(smem->dev, "bad partition magic %02x %02x %02x %02x\n",
735 header->magic[0], header->magic[1],
736 header->magic[2], header->magic[3]);
737 return NULL;
738 }
739
740 if (host0 != le16_to_cpu(header->host0)) {
741 dev_err(smem->dev, "bad host0 (%hu != %hu)\n",
742 host0, le16_to_cpu(header->host0));
743 return NULL;
744 }
745 if (host1 != le16_to_cpu(header->host1)) {
746 dev_err(smem->dev, "bad host1 (%hu != %hu)\n",
747 host1, le16_to_cpu(header->host1));
748 return NULL;
749 }
750
751 size = le32_to_cpu(header->size);
752 if (size != le32_to_cpu(entry->size)) {
753 dev_err(smem->dev, "bad partition size (%u != %u)\n",
754 size, le32_to_cpu(entry->size));
755 return NULL;
756 }
757
758 if (le32_to_cpu(header->offset_free_uncached) > size) {
759 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n",
760 le32_to_cpu(header->offset_free_uncached), size);
761 return NULL;
762 }
763
764 return header;
765 }
766
767 static int qcom_smem_set_global_partition(struct qcom_smem *smem)
768 {
769 struct smem_partition_header *header;
770 struct smem_ptable_entry *entry;
771 struct smem_ptable *ptable;
772 bool found = false;
773 int i;
774
775 if (smem->global_partition) {
776 dev_err(smem->dev, "Already found the global partition\n");
777 return -EINVAL;
778 }
779
780 ptable = qcom_smem_get_ptable(smem);
781 if (IS_ERR(ptable))
782 return PTR_ERR(ptable);
783
784 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
785 entry = &ptable->entry[i];
786 if (!le32_to_cpu(entry->offset))
787 continue;
788 if (!le32_to_cpu(entry->size))
789 continue;
790
791 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST)
792 continue;
793
794 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) {
795 found = true;
796 break;
797 }
798 }
799
800 if (!found) {
801 dev_err(smem->dev, "Missing entry for global partition\n");
802 return -EINVAL;
803 }
804
805 header = qcom_smem_partition_header(smem, entry,
806 SMEM_GLOBAL_HOST, SMEM_GLOBAL_HOST);
807 if (!header)
808 return -EINVAL;
809
810 smem->global_partition = header;
811 smem->global_cacheline = le32_to_cpu(entry->cacheline);
812
813 return 0;
814 }
815
816 static int
817 qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
818 {
819 struct smem_partition_header *header;
820 struct smem_ptable_entry *entry;
821 struct smem_ptable *ptable;
822 unsigned int remote_host;
823 u16 host0, host1;
824 int i;
825
826 ptable = qcom_smem_get_ptable(smem);
827 if (IS_ERR(ptable))
828 return PTR_ERR(ptable);
829
830 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
831 entry = &ptable->entry[i];
832 if (!le32_to_cpu(entry->offset))
833 continue;
834 if (!le32_to_cpu(entry->size))
835 continue;
836
837 host0 = le16_to_cpu(entry->host0);
838 host1 = le16_to_cpu(entry->host1);
839 if (host0 == local_host)
840 remote_host = host1;
841 else if (host1 == local_host)
842 remote_host = host0;
843 else
844 continue;
845
846 if (remote_host >= SMEM_HOST_COUNT) {
847 dev_err(smem->dev, "bad host %hu\n", remote_host);
848 return -EINVAL;
849 }
850
851 if (smem->partitions[remote_host]) {
852 dev_err(smem->dev, "duplicate host %hu\n", remote_host);
853 return -EINVAL;
854 }
855
856 header = qcom_smem_partition_header(smem, entry, host0, host1);
857 if (!header)
858 return -EINVAL;
859
860 smem->partitions[remote_host] = header;
861 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
862 }
863
864 return 0;
865 }
866
867 static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
868 const char *name, int i)
869 {
870 struct device_node *np;
871 struct resource r;
872 resource_size_t size;
873 int ret;
874
875 np = of_parse_phandle(dev->of_node, name, 0);
876 if (!np) {
877 dev_err(dev, "No %s specified\n", name);
878 return -EINVAL;
879 }
880
881 ret = of_address_to_resource(np, 0, &r);
882 of_node_put(np);
883 if (ret)
884 return ret;
885 size = resource_size(&r);
886
887 smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
888 if (!smem->regions[i].virt_base)
889 return -ENOMEM;
890 smem->regions[i].aux_base = (u32)r.start;
891 smem->regions[i].size = size;
892
893 return 0;
894 }
895
896 static int qcom_smem_probe(struct platform_device *pdev)
897 {
898 struct smem_header *header;
899 struct qcom_smem *smem;
900 size_t array_size;
901 int num_regions;
902 int hwlock_id;
903 u32 version;
904 int ret;
905
906 num_regions = 1;
907 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
908 num_regions++;
909
910 array_size = num_regions * sizeof(struct smem_region);
911 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
912 if (!smem)
913 return -ENOMEM;
914
915 smem->dev = &pdev->dev;
916 smem->num_regions = num_regions;
917
918 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
919 if (ret)
920 return ret;
921
922 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
923 "qcom,rpm-msg-ram", 1)))
924 return ret;
925
926 header = smem->regions[0].virt_base;
927 if (le32_to_cpu(header->initialized) != 1 ||
928 le32_to_cpu(header->reserved)) {
929 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
930 return -EINVAL;
931 }
932
933 version = qcom_smem_get_sbl_version(smem);
934 switch (version >> 16) {
935 case SMEM_GLOBAL_PART_VERSION:
936 ret = qcom_smem_set_global_partition(smem);
937 if (ret < 0)
938 return ret;
939 smem->item_count = qcom_smem_get_item_count(smem);
940 break;
941 case SMEM_GLOBAL_HEAP_VERSION:
942 smem->item_count = SMEM_ITEM_COUNT;
943 break;
944 default:
945 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
946 return -EINVAL;
947 }
948
949 BUILD_BUG_ON(SMEM_HOST_APPS >= SMEM_HOST_COUNT);
950 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
951 if (ret < 0 && ret != -ENOENT)
952 return ret;
953
954 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
955 if (hwlock_id < 0) {
956 if (hwlock_id != -EPROBE_DEFER)
957 dev_err(&pdev->dev, "failed to retrieve hwlock\n");
958 return hwlock_id;
959 }
960
961 smem->hwlock = hwspin_lock_request_specific(hwlock_id);
962 if (!smem->hwlock)
963 return -ENXIO;
964
965 __smem = smem;
966
967 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo",
968 PLATFORM_DEVID_NONE, NULL,
969 0);
970 if (IS_ERR(smem->socinfo))
971 dev_dbg(&pdev->dev, "failed to register socinfo device\n");
972
973 return 0;
974 }
975
976 static int qcom_smem_remove(struct platform_device *pdev)
977 {
978 platform_device_unregister(__smem->socinfo);
979
980 hwspin_lock_free(__smem->hwlock);
981 __smem = NULL;
982
983 return 0;
984 }
985
986 static const struct of_device_id qcom_smem_of_match[] = {
987 { .compatible = "qcom,smem" },
988 {}
989 };
990 MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
991
992 static struct platform_driver qcom_smem_driver = {
993 .probe = qcom_smem_probe,
994 .remove = qcom_smem_remove,
995 .driver = {
996 .name = "qcom-smem",
997 .of_match_table = qcom_smem_of_match,
998 .suppress_bind_attrs = true,
999 },
1000 };
1001
1002 static int __init qcom_smem_init(void)
1003 {
1004 return platform_driver_register(&qcom_smem_driver);
1005 }
1006 arch_initcall(qcom_smem_init);
1007
1008 static void __exit qcom_smem_exit(void)
1009 {
1010 platform_driver_unregister(&qcom_smem_driver);
1011 }
1012 module_exit(qcom_smem_exit)
1013
1014 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1015 MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
1016 MODULE_LICENSE("GPL v2");