This source file includes following definitions.
- of_node_name_eq
- of_node_name_prefix
- __of_node_is_type
- of_n_addr_cells
- of_n_size_cells
- of_node_to_nid
- __of_free_phandle_cache
- of_free_phandle_cache
- __of_free_phandle_cache_entry
- of_populate_phandle_cache
- of_core_init
- __of_find_property
- of_find_property
- __of_find_all_nodes
- of_find_all_nodes
- __of_get_property
- of_get_property
- arch_match_cpu_phys_id
- __of_find_n_match_cpu_property
- arch_find_n_match_cpu_physical_id
- of_get_cpu_node
- of_cpu_node_to_id
- __of_device_is_compatible
- of_device_is_compatible
- of_device_compatible_match
- of_machine_is_compatible
- __of_device_is_available
- of_device_is_available
- of_device_is_big_endian
- of_get_parent
- of_get_next_parent
- __of_get_next_child
- of_get_next_child
- of_get_next_available_child
- of_get_next_cpu_node
- of_get_compatible_child
- of_get_child_by_name
- __of_find_node_by_path
- __of_find_node_by_full_path
- of_find_node_opts_by_path
- of_find_node_by_name
- of_find_node_by_type
- of_find_compatible_node
- of_find_node_with_property
- __of_match_node
- of_match_node
- of_find_matching_node_and_match
- of_modalias_node
- of_find_node_by_phandle
- of_print_phandle_args
- of_phandle_iterator_init
- of_phandle_iterator_next
- of_phandle_iterator_args
- __of_parse_phandle_with_args
- of_parse_phandle
- of_parse_phandle_with_args
- of_parse_phandle_with_args_map
- of_parse_phandle_with_fixed_args
- of_count_phandle_with_args
- __of_add_property
- of_add_property
- __of_remove_property
- of_remove_property
- __of_update_property
- of_update_property
- of_alias_add
- of_alias_scan
- of_alias_get_id
- of_alias_get_alias_list
- of_alias_get_highest_id
- of_console_check
- of_find_next_cache_node
- of_find_last_cache_level
- of_map_rid
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #define pr_fmt(fmt) "OF: " fmt
18
19 #include <linux/bitmap.h>
20 #include <linux/console.h>
21 #include <linux/ctype.h>
22 #include <linux/cpu.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/of_graph.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/string.h>
30 #include <linux/proc_fs.h>
31
32 #include "of_private.h"
33
34 LIST_HEAD(aliases_lookup);
35
36 struct device_node *of_root;
37 EXPORT_SYMBOL(of_root);
38 struct device_node *of_chosen;
39 struct device_node *of_aliases;
40 struct device_node *of_stdout;
41 static const char *of_stdout_options;
42
43 struct kset *of_kset;
44
45
46
47
48
49
50
51 DEFINE_MUTEX(of_mutex);
52
53
54
55
56 DEFINE_RAW_SPINLOCK(devtree_lock);
57
58 bool of_node_name_eq(const struct device_node *np, const char *name)
59 {
60 const char *node_name;
61 size_t len;
62
63 if (!np)
64 return false;
65
66 node_name = kbasename(np->full_name);
67 len = strchrnul(node_name, '@') - node_name;
68
69 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
70 }
71 EXPORT_SYMBOL(of_node_name_eq);
72
73 bool of_node_name_prefix(const struct device_node *np, const char *prefix)
74 {
75 if (!np)
76 return false;
77
78 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
79 }
80 EXPORT_SYMBOL(of_node_name_prefix);
81
82 static bool __of_node_is_type(const struct device_node *np, const char *type)
83 {
84 const char *match = __of_get_property(np, "device_type", NULL);
85
86 return np && match && type && !strcmp(match, type);
87 }
88
89 int of_n_addr_cells(struct device_node *np)
90 {
91 u32 cells;
92
93 do {
94 if (np->parent)
95 np = np->parent;
96 if (!of_property_read_u32(np, "#address-cells", &cells))
97 return cells;
98 } while (np->parent);
99
100 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
101 }
102 EXPORT_SYMBOL(of_n_addr_cells);
103
104 int of_n_size_cells(struct device_node *np)
105 {
106 u32 cells;
107
108 do {
109 if (np->parent)
110 np = np->parent;
111 if (!of_property_read_u32(np, "#size-cells", &cells))
112 return cells;
113 } while (np->parent);
114
115 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
116 }
117 EXPORT_SYMBOL(of_n_size_cells);
118
119 #ifdef CONFIG_NUMA
120 int __weak of_node_to_nid(struct device_node *np)
121 {
122 return NUMA_NO_NODE;
123 }
124 #endif
125
126
127
128
129
130
131
132
133
134
135 static struct device_node **phandle_cache;
136 static u32 phandle_cache_mask;
137
138
139
140
141 static void __of_free_phandle_cache(void)
142 {
143 u32 cache_entries = phandle_cache_mask + 1;
144 u32 k;
145
146 if (!phandle_cache)
147 return;
148
149 for (k = 0; k < cache_entries; k++)
150 of_node_put(phandle_cache[k]);
151
152 kfree(phandle_cache);
153 phandle_cache = NULL;
154 }
155
156 int of_free_phandle_cache(void)
157 {
158 unsigned long flags;
159
160 raw_spin_lock_irqsave(&devtree_lock, flags);
161
162 __of_free_phandle_cache();
163
164 raw_spin_unlock_irqrestore(&devtree_lock, flags);
165
166 return 0;
167 }
168 #if !defined(CONFIG_MODULES)
169 late_initcall_sync(of_free_phandle_cache);
170 #endif
171
172
173
174
175 void __of_free_phandle_cache_entry(phandle handle)
176 {
177 phandle masked_handle;
178 struct device_node *np;
179
180 if (!handle)
181 return;
182
183 masked_handle = handle & phandle_cache_mask;
184
185 if (phandle_cache) {
186 np = phandle_cache[masked_handle];
187 if (np && handle == np->phandle) {
188 of_node_put(np);
189 phandle_cache[masked_handle] = NULL;
190 }
191 }
192 }
193
194 void of_populate_phandle_cache(void)
195 {
196 unsigned long flags;
197 u32 cache_entries;
198 struct device_node *np;
199 u32 phandles = 0;
200
201 raw_spin_lock_irqsave(&devtree_lock, flags);
202
203 __of_free_phandle_cache();
204
205 for_each_of_allnodes(np)
206 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
207 phandles++;
208
209 if (!phandles)
210 goto out;
211
212 cache_entries = roundup_pow_of_two(phandles);
213 phandle_cache_mask = cache_entries - 1;
214
215 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
216 GFP_ATOMIC);
217 if (!phandle_cache)
218 goto out;
219
220 for_each_of_allnodes(np)
221 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
222 of_node_get(np);
223 phandle_cache[np->phandle & phandle_cache_mask] = np;
224 }
225
226 out:
227 raw_spin_unlock_irqrestore(&devtree_lock, flags);
228 }
229
230 void __init of_core_init(void)
231 {
232 struct device_node *np;
233
234 of_populate_phandle_cache();
235
236
237 mutex_lock(&of_mutex);
238 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
239 if (!of_kset) {
240 mutex_unlock(&of_mutex);
241 pr_err("failed to register existing nodes\n");
242 return;
243 }
244 for_each_of_allnodes(np)
245 __of_attach_node_sysfs(np);
246 mutex_unlock(&of_mutex);
247
248
249 if (of_root)
250 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
251 }
252
253 static struct property *__of_find_property(const struct device_node *np,
254 const char *name, int *lenp)
255 {
256 struct property *pp;
257
258 if (!np)
259 return NULL;
260
261 for (pp = np->properties; pp; pp = pp->next) {
262 if (of_prop_cmp(pp->name, name) == 0) {
263 if (lenp)
264 *lenp = pp->length;
265 break;
266 }
267 }
268
269 return pp;
270 }
271
272 struct property *of_find_property(const struct device_node *np,
273 const char *name,
274 int *lenp)
275 {
276 struct property *pp;
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&devtree_lock, flags);
280 pp = __of_find_property(np, name, lenp);
281 raw_spin_unlock_irqrestore(&devtree_lock, flags);
282
283 return pp;
284 }
285 EXPORT_SYMBOL(of_find_property);
286
287 struct device_node *__of_find_all_nodes(struct device_node *prev)
288 {
289 struct device_node *np;
290 if (!prev) {
291 np = of_root;
292 } else if (prev->child) {
293 np = prev->child;
294 } else {
295
296 np = prev;
297 while (np->parent && !np->sibling)
298 np = np->parent;
299 np = np->sibling;
300 }
301 return np;
302 }
303
304
305
306
307
308
309
310
311
312 struct device_node *of_find_all_nodes(struct device_node *prev)
313 {
314 struct device_node *np;
315 unsigned long flags;
316
317 raw_spin_lock_irqsave(&devtree_lock, flags);
318 np = __of_find_all_nodes(prev);
319 of_node_get(np);
320 of_node_put(prev);
321 raw_spin_unlock_irqrestore(&devtree_lock, flags);
322 return np;
323 }
324 EXPORT_SYMBOL(of_find_all_nodes);
325
326
327
328
329
330 const void *__of_get_property(const struct device_node *np,
331 const char *name, int *lenp)
332 {
333 struct property *pp = __of_find_property(np, name, lenp);
334
335 return pp ? pp->value : NULL;
336 }
337
338
339
340
341
342 const void *of_get_property(const struct device_node *np, const char *name,
343 int *lenp)
344 {
345 struct property *pp = of_find_property(np, name, lenp);
346
347 return pp ? pp->value : NULL;
348 }
349 EXPORT_SYMBOL(of_get_property);
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365 bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
366 {
367 return (u32)phys_id == cpu;
368 }
369
370
371
372
373
374
375 static bool __of_find_n_match_cpu_property(struct device_node *cpun,
376 const char *prop_name, int cpu, unsigned int *thread)
377 {
378 const __be32 *cell;
379 int ac, prop_len, tid;
380 u64 hwid;
381
382 ac = of_n_addr_cells(cpun);
383 cell = of_get_property(cpun, prop_name, &prop_len);
384 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
385 return true;
386 if (!cell || !ac)
387 return false;
388 prop_len /= sizeof(*cell) * ac;
389 for (tid = 0; tid < prop_len; tid++) {
390 hwid = of_read_number(cell, ac);
391 if (arch_match_cpu_phys_id(cpu, hwid)) {
392 if (thread)
393 *thread = tid;
394 return true;
395 }
396 cell += ac;
397 }
398 return false;
399 }
400
401
402
403
404
405
406
407 bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
408 int cpu, unsigned int *thread)
409 {
410
411
412
413
414 if (IS_ENABLED(CONFIG_PPC) &&
415 __of_find_n_match_cpu_property(cpun,
416 "ibm,ppc-interrupt-server#s",
417 cpu, thread))
418 return true;
419
420 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
421 }
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
443 {
444 struct device_node *cpun;
445
446 for_each_of_cpu_node(cpun) {
447 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
448 return cpun;
449 }
450 return NULL;
451 }
452 EXPORT_SYMBOL(of_get_cpu_node);
453
454
455
456
457
458
459
460
461
462 int of_cpu_node_to_id(struct device_node *cpu_node)
463 {
464 int cpu;
465 bool found = false;
466 struct device_node *np;
467
468 for_each_possible_cpu(cpu) {
469 np = of_cpu_device_node_get(cpu);
470 found = (cpu_node == np);
471 of_node_put(np);
472 if (found)
473 return cpu;
474 }
475
476 return -ENODEV;
477 }
478 EXPORT_SYMBOL(of_cpu_node_to_id);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510 static int __of_device_is_compatible(const struct device_node *device,
511 const char *compat, const char *type, const char *name)
512 {
513 struct property *prop;
514 const char *cp;
515 int index = 0, score = 0;
516
517
518 if (compat && compat[0]) {
519 prop = __of_find_property(device, "compatible", NULL);
520 for (cp = of_prop_next_string(prop, NULL); cp;
521 cp = of_prop_next_string(prop, cp), index++) {
522 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
523 score = INT_MAX/2 - (index << 2);
524 break;
525 }
526 }
527 if (!score)
528 return 0;
529 }
530
531
532 if (type && type[0]) {
533 if (!__of_node_is_type(device, type))
534 return 0;
535 score += 2;
536 }
537
538
539 if (name && name[0]) {
540 if (!of_node_name_eq(device, name))
541 return 0;
542 score++;
543 }
544
545 return score;
546 }
547
548
549
550
551 int of_device_is_compatible(const struct device_node *device,
552 const char *compat)
553 {
554 unsigned long flags;
555 int res;
556
557 raw_spin_lock_irqsave(&devtree_lock, flags);
558 res = __of_device_is_compatible(device, compat, NULL, NULL);
559 raw_spin_unlock_irqrestore(&devtree_lock, flags);
560 return res;
561 }
562 EXPORT_SYMBOL(of_device_is_compatible);
563
564
565
566
567
568 int of_device_compatible_match(struct device_node *device,
569 const char *const *compat)
570 {
571 unsigned int tmp, score = 0;
572
573 if (!compat)
574 return 0;
575
576 while (*compat) {
577 tmp = of_device_is_compatible(device, *compat);
578 if (tmp > score)
579 score = tmp;
580 compat++;
581 }
582
583 return score;
584 }
585
586
587
588
589
590
591
592
593 int of_machine_is_compatible(const char *compat)
594 {
595 struct device_node *root;
596 int rc = 0;
597
598 root = of_find_node_by_path("/");
599 if (root) {
600 rc = of_device_is_compatible(root, compat);
601 of_node_put(root);
602 }
603 return rc;
604 }
605 EXPORT_SYMBOL(of_machine_is_compatible);
606
607
608
609
610
611
612
613
614
615 static bool __of_device_is_available(const struct device_node *device)
616 {
617 const char *status;
618 int statlen;
619
620 if (!device)
621 return false;
622
623 status = __of_get_property(device, "status", &statlen);
624 if (status == NULL)
625 return true;
626
627 if (statlen > 0) {
628 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
629 return true;
630 }
631
632 return false;
633 }
634
635
636
637
638
639
640
641
642
643 bool of_device_is_available(const struct device_node *device)
644 {
645 unsigned long flags;
646 bool res;
647
648 raw_spin_lock_irqsave(&devtree_lock, flags);
649 res = __of_device_is_available(device);
650 raw_spin_unlock_irqrestore(&devtree_lock, flags);
651 return res;
652
653 }
654 EXPORT_SYMBOL(of_device_is_available);
655
656
657
658
659
660
661
662
663
664
665
666
667
668 bool of_device_is_big_endian(const struct device_node *device)
669 {
670 if (of_property_read_bool(device, "big-endian"))
671 return true;
672 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
673 of_property_read_bool(device, "native-endian"))
674 return true;
675 return false;
676 }
677 EXPORT_SYMBOL(of_device_is_big_endian);
678
679
680
681
682
683
684
685
686 struct device_node *of_get_parent(const struct device_node *node)
687 {
688 struct device_node *np;
689 unsigned long flags;
690
691 if (!node)
692 return NULL;
693
694 raw_spin_lock_irqsave(&devtree_lock, flags);
695 np = of_node_get(node->parent);
696 raw_spin_unlock_irqrestore(&devtree_lock, flags);
697 return np;
698 }
699 EXPORT_SYMBOL(of_get_parent);
700
701
702
703
704
705
706
707
708
709
710
711
712 struct device_node *of_get_next_parent(struct device_node *node)
713 {
714 struct device_node *parent;
715 unsigned long flags;
716
717 if (!node)
718 return NULL;
719
720 raw_spin_lock_irqsave(&devtree_lock, flags);
721 parent = of_node_get(node->parent);
722 of_node_put(node);
723 raw_spin_unlock_irqrestore(&devtree_lock, flags);
724 return parent;
725 }
726 EXPORT_SYMBOL(of_get_next_parent);
727
728 static struct device_node *__of_get_next_child(const struct device_node *node,
729 struct device_node *prev)
730 {
731 struct device_node *next;
732
733 if (!node)
734 return NULL;
735
736 next = prev ? prev->sibling : node->child;
737 for (; next; next = next->sibling)
738 if (of_node_get(next))
739 break;
740 of_node_put(prev);
741 return next;
742 }
743 #define __for_each_child_of_node(parent, child) \
744 for (child = __of_get_next_child(parent, NULL); child != NULL; \
745 child = __of_get_next_child(parent, child))
746
747
748
749
750
751
752
753
754
755
756 struct device_node *of_get_next_child(const struct device_node *node,
757 struct device_node *prev)
758 {
759 struct device_node *next;
760 unsigned long flags;
761
762 raw_spin_lock_irqsave(&devtree_lock, flags);
763 next = __of_get_next_child(node, prev);
764 raw_spin_unlock_irqrestore(&devtree_lock, flags);
765 return next;
766 }
767 EXPORT_SYMBOL(of_get_next_child);
768
769
770
771
772
773
774
775
776
777 struct device_node *of_get_next_available_child(const struct device_node *node,
778 struct device_node *prev)
779 {
780 struct device_node *next;
781 unsigned long flags;
782
783 if (!node)
784 return NULL;
785
786 raw_spin_lock_irqsave(&devtree_lock, flags);
787 next = prev ? prev->sibling : node->child;
788 for (; next; next = next->sibling) {
789 if (!__of_device_is_available(next))
790 continue;
791 if (of_node_get(next))
792 break;
793 }
794 of_node_put(prev);
795 raw_spin_unlock_irqrestore(&devtree_lock, flags);
796 return next;
797 }
798 EXPORT_SYMBOL(of_get_next_available_child);
799
800
801
802
803
804
805
806
807
808 struct device_node *of_get_next_cpu_node(struct device_node *prev)
809 {
810 struct device_node *next = NULL;
811 unsigned long flags;
812 struct device_node *node;
813
814 if (!prev)
815 node = of_find_node_by_path("/cpus");
816
817 raw_spin_lock_irqsave(&devtree_lock, flags);
818 if (prev)
819 next = prev->sibling;
820 else if (node) {
821 next = node->child;
822 of_node_put(node);
823 }
824 for (; next; next = next->sibling) {
825 if (!(of_node_name_eq(next, "cpu") ||
826 __of_node_is_type(next, "cpu")))
827 continue;
828 if (of_node_get(next))
829 break;
830 }
831 of_node_put(prev);
832 raw_spin_unlock_irqrestore(&devtree_lock, flags);
833 return next;
834 }
835 EXPORT_SYMBOL(of_get_next_cpu_node);
836
837
838
839
840
841
842
843
844
845
846
847
848 struct device_node *of_get_compatible_child(const struct device_node *parent,
849 const char *compatible)
850 {
851 struct device_node *child;
852
853 for_each_child_of_node(parent, child) {
854 if (of_device_is_compatible(child, compatible))
855 break;
856 }
857
858 return child;
859 }
860 EXPORT_SYMBOL(of_get_compatible_child);
861
862
863
864
865
866
867
868
869
870
871
872
873 struct device_node *of_get_child_by_name(const struct device_node *node,
874 const char *name)
875 {
876 struct device_node *child;
877
878 for_each_child_of_node(node, child)
879 if (of_node_name_eq(child, name))
880 break;
881 return child;
882 }
883 EXPORT_SYMBOL(of_get_child_by_name);
884
885 struct device_node *__of_find_node_by_path(struct device_node *parent,
886 const char *path)
887 {
888 struct device_node *child;
889 int len;
890
891 len = strcspn(path, "/:");
892 if (!len)
893 return NULL;
894
895 __for_each_child_of_node(parent, child) {
896 const char *name = kbasename(child->full_name);
897 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
898 return child;
899 }
900 return NULL;
901 }
902
903 struct device_node *__of_find_node_by_full_path(struct device_node *node,
904 const char *path)
905 {
906 const char *separator = strchr(path, ':');
907
908 while (node && *path == '/') {
909 struct device_node *tmp = node;
910
911 path++;
912 node = __of_find_node_by_path(node, path);
913 of_node_put(tmp);
914 path = strchrnul(path, '/');
915 if (separator && separator < path)
916 break;
917 }
918 return node;
919 }
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939 struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
940 {
941 struct device_node *np = NULL;
942 struct property *pp;
943 unsigned long flags;
944 const char *separator = strchr(path, ':');
945
946 if (opts)
947 *opts = separator ? separator + 1 : NULL;
948
949 if (strcmp(path, "/") == 0)
950 return of_node_get(of_root);
951
952
953 if (*path != '/') {
954 int len;
955 const char *p = separator;
956
957 if (!p)
958 p = strchrnul(path, '/');
959 len = p - path;
960
961
962 if (!of_aliases)
963 return NULL;
964
965 for_each_property_of_node(of_aliases, pp) {
966 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
967 np = of_find_node_by_path(pp->value);
968 break;
969 }
970 }
971 if (!np)
972 return NULL;
973 path = p;
974 }
975
976
977 raw_spin_lock_irqsave(&devtree_lock, flags);
978 if (!np)
979 np = of_node_get(of_root);
980 np = __of_find_node_by_full_path(np, path);
981 raw_spin_unlock_irqrestore(&devtree_lock, flags);
982 return np;
983 }
984 EXPORT_SYMBOL(of_find_node_opts_by_path);
985
986
987
988
989
990
991
992
993
994
995
996
997 struct device_node *of_find_node_by_name(struct device_node *from,
998 const char *name)
999 {
1000 struct device_node *np;
1001 unsigned long flags;
1002
1003 raw_spin_lock_irqsave(&devtree_lock, flags);
1004 for_each_of_allnodes_from(from, np)
1005 if (of_node_name_eq(np, name) && of_node_get(np))
1006 break;
1007 of_node_put(from);
1008 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1009 return np;
1010 }
1011 EXPORT_SYMBOL(of_find_node_by_name);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 struct device_node *of_find_node_by_type(struct device_node *from,
1026 const char *type)
1027 {
1028 struct device_node *np;
1029 unsigned long flags;
1030
1031 raw_spin_lock_irqsave(&devtree_lock, flags);
1032 for_each_of_allnodes_from(from, np)
1033 if (__of_node_is_type(np, type) && of_node_get(np))
1034 break;
1035 of_node_put(from);
1036 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1037 return np;
1038 }
1039 EXPORT_SYMBOL(of_find_node_by_type);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 struct device_node *of_find_compatible_node(struct device_node *from,
1056 const char *type, const char *compatible)
1057 {
1058 struct device_node *np;
1059 unsigned long flags;
1060
1061 raw_spin_lock_irqsave(&devtree_lock, flags);
1062 for_each_of_allnodes_from(from, np)
1063 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1064 of_node_get(np))
1065 break;
1066 of_node_put(from);
1067 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1068 return np;
1069 }
1070 EXPORT_SYMBOL(of_find_compatible_node);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 struct device_node *of_find_node_with_property(struct device_node *from,
1085 const char *prop_name)
1086 {
1087 struct device_node *np;
1088 struct property *pp;
1089 unsigned long flags;
1090
1091 raw_spin_lock_irqsave(&devtree_lock, flags);
1092 for_each_of_allnodes_from(from, np) {
1093 for (pp = np->properties; pp; pp = pp->next) {
1094 if (of_prop_cmp(pp->name, prop_name) == 0) {
1095 of_node_get(np);
1096 goto out;
1097 }
1098 }
1099 }
1100 out:
1101 of_node_put(from);
1102 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1103 return np;
1104 }
1105 EXPORT_SYMBOL(of_find_node_with_property);
1106
1107 static
1108 const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1109 const struct device_node *node)
1110 {
1111 const struct of_device_id *best_match = NULL;
1112 int score, best_score = 0;
1113
1114 if (!matches)
1115 return NULL;
1116
1117 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1118 score = __of_device_is_compatible(node, matches->compatible,
1119 matches->type, matches->name);
1120 if (score > best_score) {
1121 best_match = matches;
1122 best_score = score;
1123 }
1124 }
1125
1126 return best_match;
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136 const struct of_device_id *of_match_node(const struct of_device_id *matches,
1137 const struct device_node *node)
1138 {
1139 const struct of_device_id *match;
1140 unsigned long flags;
1141
1142 raw_spin_lock_irqsave(&devtree_lock, flags);
1143 match = __of_match_node(matches, node);
1144 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1145 return match;
1146 }
1147 EXPORT_SYMBOL(of_match_node);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 struct device_node *of_find_matching_node_and_match(struct device_node *from,
1163 const struct of_device_id *matches,
1164 const struct of_device_id **match)
1165 {
1166 struct device_node *np;
1167 const struct of_device_id *m;
1168 unsigned long flags;
1169
1170 if (match)
1171 *match = NULL;
1172
1173 raw_spin_lock_irqsave(&devtree_lock, flags);
1174 for_each_of_allnodes_from(from, np) {
1175 m = __of_match_node(matches, np);
1176 if (m && of_node_get(np)) {
1177 if (match)
1178 *match = m;
1179 break;
1180 }
1181 }
1182 of_node_put(from);
1183 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1184 return np;
1185 }
1186 EXPORT_SYMBOL(of_find_matching_node_and_match);
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 int of_modalias_node(struct device_node *node, char *modalias, int len)
1202 {
1203 const char *compatible, *p;
1204 int cplen;
1205
1206 compatible = of_get_property(node, "compatible", &cplen);
1207 if (!compatible || strlen(compatible) > cplen)
1208 return -ENODEV;
1209 p = strchr(compatible, ',');
1210 strlcpy(modalias, p ? p + 1 : compatible, len);
1211 return 0;
1212 }
1213 EXPORT_SYMBOL_GPL(of_modalias_node);
1214
1215
1216
1217
1218
1219
1220
1221
1222 struct device_node *of_find_node_by_phandle(phandle handle)
1223 {
1224 struct device_node *np = NULL;
1225 unsigned long flags;
1226 phandle masked_handle;
1227
1228 if (!handle)
1229 return NULL;
1230
1231 raw_spin_lock_irqsave(&devtree_lock, flags);
1232
1233 masked_handle = handle & phandle_cache_mask;
1234
1235 if (phandle_cache) {
1236 if (phandle_cache[masked_handle] &&
1237 handle == phandle_cache[masked_handle]->phandle)
1238 np = phandle_cache[masked_handle];
1239 if (np && of_node_check_flag(np, OF_DETACHED)) {
1240 WARN_ON(1);
1241 of_node_put(np);
1242 phandle_cache[masked_handle] = NULL;
1243 np = NULL;
1244 }
1245 }
1246
1247 if (!np) {
1248 for_each_of_allnodes(np)
1249 if (np->phandle == handle &&
1250 !of_node_check_flag(np, OF_DETACHED)) {
1251 if (phandle_cache) {
1252
1253 of_node_get(np);
1254 phandle_cache[masked_handle] = np;
1255 }
1256 break;
1257 }
1258 }
1259
1260 of_node_get(np);
1261 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1262 return np;
1263 }
1264 EXPORT_SYMBOL(of_find_node_by_phandle);
1265
1266 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1267 {
1268 int i;
1269 printk("%s %pOF", msg, args->np);
1270 for (i = 0; i < args->args_count; i++) {
1271 const char delim = i ? ',' : ':';
1272
1273 pr_cont("%c%08x", delim, args->args[i]);
1274 }
1275 pr_cont("\n");
1276 }
1277
1278 int of_phandle_iterator_init(struct of_phandle_iterator *it,
1279 const struct device_node *np,
1280 const char *list_name,
1281 const char *cells_name,
1282 int cell_count)
1283 {
1284 const __be32 *list;
1285 int size;
1286
1287 memset(it, 0, sizeof(*it));
1288
1289
1290
1291
1292
1293 if (cell_count < 0 && !cells_name)
1294 return -EINVAL;
1295
1296 list = of_get_property(np, list_name, &size);
1297 if (!list)
1298 return -ENOENT;
1299
1300 it->cells_name = cells_name;
1301 it->cell_count = cell_count;
1302 it->parent = np;
1303 it->list_end = list + size / sizeof(*list);
1304 it->phandle_end = list;
1305 it->cur = list;
1306
1307 return 0;
1308 }
1309 EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1310
1311 int of_phandle_iterator_next(struct of_phandle_iterator *it)
1312 {
1313 uint32_t count = 0;
1314
1315 if (it->node) {
1316 of_node_put(it->node);
1317 it->node = NULL;
1318 }
1319
1320 if (!it->cur || it->phandle_end >= it->list_end)
1321 return -ENOENT;
1322
1323 it->cur = it->phandle_end;
1324
1325
1326 it->phandle = be32_to_cpup(it->cur++);
1327
1328 if (it->phandle) {
1329
1330
1331
1332
1333
1334 it->node = of_find_node_by_phandle(it->phandle);
1335
1336 if (it->cells_name) {
1337 if (!it->node) {
1338 pr_err("%pOF: could not find phandle\n",
1339 it->parent);
1340 goto err;
1341 }
1342
1343 if (of_property_read_u32(it->node, it->cells_name,
1344 &count)) {
1345
1346
1347
1348
1349
1350 if (it->cell_count >= 0) {
1351 count = it->cell_count;
1352 } else {
1353 pr_err("%pOF: could not get %s for %pOF\n",
1354 it->parent,
1355 it->cells_name,
1356 it->node);
1357 goto err;
1358 }
1359 }
1360 } else {
1361 count = it->cell_count;
1362 }
1363
1364
1365
1366
1367
1368 if (it->cur + count > it->list_end) {
1369 pr_err("%pOF: %s = %d found %d\n",
1370 it->parent, it->cells_name,
1371 count, it->cell_count);
1372 goto err;
1373 }
1374 }
1375
1376 it->phandle_end = it->cur + count;
1377 it->cur_count = count;
1378
1379 return 0;
1380
1381 err:
1382 if (it->node) {
1383 of_node_put(it->node);
1384 it->node = NULL;
1385 }
1386
1387 return -EINVAL;
1388 }
1389 EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1390
1391 int of_phandle_iterator_args(struct of_phandle_iterator *it,
1392 uint32_t *args,
1393 int size)
1394 {
1395 int i, count;
1396
1397 count = it->cur_count;
1398
1399 if (WARN_ON(size < count))
1400 count = size;
1401
1402 for (i = 0; i < count; i++)
1403 args[i] = be32_to_cpup(it->cur++);
1404
1405 return count;
1406 }
1407
1408 static int __of_parse_phandle_with_args(const struct device_node *np,
1409 const char *list_name,
1410 const char *cells_name,
1411 int cell_count, int index,
1412 struct of_phandle_args *out_args)
1413 {
1414 struct of_phandle_iterator it;
1415 int rc, cur_index = 0;
1416
1417
1418 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1419
1420
1421
1422
1423
1424
1425 rc = -ENOENT;
1426 if (cur_index == index) {
1427 if (!it.phandle)
1428 goto err;
1429
1430 if (out_args) {
1431 int c;
1432
1433 c = of_phandle_iterator_args(&it,
1434 out_args->args,
1435 MAX_PHANDLE_ARGS);
1436 out_args->np = it.node;
1437 out_args->args_count = c;
1438 } else {
1439 of_node_put(it.node);
1440 }
1441
1442
1443 return 0;
1444 }
1445
1446 cur_index++;
1447 }
1448
1449
1450
1451
1452
1453
1454
1455 err:
1456 of_node_put(it.node);
1457 return rc;
1458 }
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 struct device_node *of_parse_phandle(const struct device_node *np,
1471 const char *phandle_name, int index)
1472 {
1473 struct of_phandle_args args;
1474
1475 if (index < 0)
1476 return NULL;
1477
1478 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1479 index, &args))
1480 return NULL;
1481
1482 return args.np;
1483 }
1484 EXPORT_SYMBOL(of_parse_phandle);
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1519 const char *cells_name, int index,
1520 struct of_phandle_args *out_args)
1521 {
1522 int cell_count = -1;
1523
1524 if (index < 0)
1525 return -EINVAL;
1526
1527
1528 if (!cells_name)
1529 cell_count = 0;
1530
1531 return __of_parse_phandle_with_args(np, list_name, cells_name,
1532 cell_count, index, out_args);
1533 }
1534 EXPORT_SYMBOL(of_parse_phandle_with_args);
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 int of_parse_phandle_with_args_map(const struct device_node *np,
1579 const char *list_name,
1580 const char *stem_name,
1581 int index, struct of_phandle_args *out_args)
1582 {
1583 char *cells_name, *map_name = NULL, *mask_name = NULL;
1584 char *pass_name = NULL;
1585 struct device_node *cur, *new = NULL;
1586 const __be32 *map, *mask, *pass;
1587 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1588 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1589 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1590 const __be32 *match_array = initial_match_array;
1591 int i, ret, map_len, match;
1592 u32 list_size, new_size;
1593
1594 if (index < 0)
1595 return -EINVAL;
1596
1597 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1598 if (!cells_name)
1599 return -ENOMEM;
1600
1601 ret = -ENOMEM;
1602 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1603 if (!map_name)
1604 goto free;
1605
1606 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1607 if (!mask_name)
1608 goto free;
1609
1610 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1611 if (!pass_name)
1612 goto free;
1613
1614 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1615 out_args);
1616 if (ret)
1617 goto free;
1618
1619
1620 cur = out_args->np;
1621 ret = of_property_read_u32(cur, cells_name, &list_size);
1622 if (ret < 0)
1623 goto put;
1624
1625
1626 for (i = 0; i < list_size; i++)
1627 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1628
1629 ret = -EINVAL;
1630 while (cur) {
1631
1632 map = of_get_property(cur, map_name, &map_len);
1633 if (!map) {
1634 ret = 0;
1635 goto free;
1636 }
1637 map_len /= sizeof(u32);
1638
1639
1640 mask = of_get_property(cur, mask_name, NULL);
1641 if (!mask)
1642 mask = dummy_mask;
1643
1644 match = 0;
1645 while (map_len > (list_size + 1) && !match) {
1646
1647 match = 1;
1648 for (i = 0; i < list_size; i++, map_len--)
1649 match &= !((match_array[i] ^ *map++) & mask[i]);
1650
1651 of_node_put(new);
1652 new = of_find_node_by_phandle(be32_to_cpup(map));
1653 map++;
1654 map_len--;
1655
1656
1657 if (!new)
1658 goto put;
1659
1660 if (!of_device_is_available(new))
1661 match = 0;
1662
1663 ret = of_property_read_u32(new, cells_name, &new_size);
1664 if (ret)
1665 goto put;
1666
1667
1668 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1669 goto put;
1670 if (map_len < new_size)
1671 goto put;
1672
1673
1674 map += new_size;
1675 map_len -= new_size;
1676 }
1677 if (!match)
1678 goto put;
1679
1680
1681 pass = of_get_property(cur, pass_name, NULL);
1682 if (!pass)
1683 pass = dummy_pass;
1684
1685
1686
1687
1688
1689
1690 match_array = map - new_size;
1691 for (i = 0; i < new_size; i++) {
1692 __be32 val = *(map - new_size + i);
1693
1694 if (i < list_size) {
1695 val &= ~pass[i];
1696 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1697 }
1698
1699 out_args->args[i] = be32_to_cpu(val);
1700 }
1701 out_args->args_count = list_size = new_size;
1702
1703 out_args->np = new;
1704 of_node_put(cur);
1705 cur = new;
1706 }
1707 put:
1708 of_node_put(cur);
1709 of_node_put(new);
1710 free:
1711 kfree(mask_name);
1712 kfree(map_name);
1713 kfree(cells_name);
1714 kfree(pass_name);
1715
1716 return ret;
1717 }
1718 EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 int of_parse_phandle_with_fixed_args(const struct device_node *np,
1751 const char *list_name, int cell_count,
1752 int index, struct of_phandle_args *out_args)
1753 {
1754 if (index < 0)
1755 return -EINVAL;
1756 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1757 index, out_args);
1758 }
1759 EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1777 const char *cells_name)
1778 {
1779 struct of_phandle_iterator it;
1780 int rc, cur_index = 0;
1781
1782
1783
1784
1785
1786
1787
1788 if (!cells_name) {
1789 const __be32 *list;
1790 int size;
1791
1792 list = of_get_property(np, list_name, &size);
1793 if (!list)
1794 return -ENOENT;
1795
1796 return size / sizeof(*list);
1797 }
1798
1799 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1800 if (rc)
1801 return rc;
1802
1803 while ((rc = of_phandle_iterator_next(&it)) == 0)
1804 cur_index += 1;
1805
1806 if (rc != -ENOENT)
1807 return rc;
1808
1809 return cur_index;
1810 }
1811 EXPORT_SYMBOL(of_count_phandle_with_args);
1812
1813
1814
1815
1816 int __of_add_property(struct device_node *np, struct property *prop)
1817 {
1818 struct property **next;
1819
1820 prop->next = NULL;
1821 next = &np->properties;
1822 while (*next) {
1823 if (strcmp(prop->name, (*next)->name) == 0)
1824
1825 return -EEXIST;
1826
1827 next = &(*next)->next;
1828 }
1829 *next = prop;
1830
1831 return 0;
1832 }
1833
1834
1835
1836
1837 int of_add_property(struct device_node *np, struct property *prop)
1838 {
1839 unsigned long flags;
1840 int rc;
1841
1842 mutex_lock(&of_mutex);
1843
1844 raw_spin_lock_irqsave(&devtree_lock, flags);
1845 rc = __of_add_property(np, prop);
1846 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1847
1848 if (!rc)
1849 __of_add_property_sysfs(np, prop);
1850
1851 mutex_unlock(&of_mutex);
1852
1853 if (!rc)
1854 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1855
1856 return rc;
1857 }
1858
1859 int __of_remove_property(struct device_node *np, struct property *prop)
1860 {
1861 struct property **next;
1862
1863 for (next = &np->properties; *next; next = &(*next)->next) {
1864 if (*next == prop)
1865 break;
1866 }
1867 if (*next == NULL)
1868 return -ENODEV;
1869
1870
1871 *next = prop->next;
1872 prop->next = np->deadprops;
1873 np->deadprops = prop;
1874
1875 return 0;
1876 }
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 int of_remove_property(struct device_node *np, struct property *prop)
1887 {
1888 unsigned long flags;
1889 int rc;
1890
1891 if (!prop)
1892 return -ENODEV;
1893
1894 mutex_lock(&of_mutex);
1895
1896 raw_spin_lock_irqsave(&devtree_lock, flags);
1897 rc = __of_remove_property(np, prop);
1898 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1899
1900 if (!rc)
1901 __of_remove_property_sysfs(np, prop);
1902
1903 mutex_unlock(&of_mutex);
1904
1905 if (!rc)
1906 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1907
1908 return rc;
1909 }
1910
1911 int __of_update_property(struct device_node *np, struct property *newprop,
1912 struct property **oldpropp)
1913 {
1914 struct property **next, *oldprop;
1915
1916 for (next = &np->properties; *next; next = &(*next)->next) {
1917 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1918 break;
1919 }
1920 *oldpropp = oldprop = *next;
1921
1922 if (oldprop) {
1923
1924 newprop->next = oldprop->next;
1925 *next = newprop;
1926 oldprop->next = np->deadprops;
1927 np->deadprops = oldprop;
1928 } else {
1929
1930 newprop->next = NULL;
1931 *next = newprop;
1932 }
1933
1934 return 0;
1935 }
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946 int of_update_property(struct device_node *np, struct property *newprop)
1947 {
1948 struct property *oldprop;
1949 unsigned long flags;
1950 int rc;
1951
1952 if (!newprop->name)
1953 return -EINVAL;
1954
1955 mutex_lock(&of_mutex);
1956
1957 raw_spin_lock_irqsave(&devtree_lock, flags);
1958 rc = __of_update_property(np, newprop, &oldprop);
1959 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1960
1961 if (!rc)
1962 __of_update_property_sysfs(np, newprop, oldprop);
1963
1964 mutex_unlock(&of_mutex);
1965
1966 if (!rc)
1967 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1968
1969 return rc;
1970 }
1971
1972 static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1973 int id, const char *stem, int stem_len)
1974 {
1975 ap->np = np;
1976 ap->id = id;
1977 strncpy(ap->stem, stem, stem_len);
1978 ap->stem[stem_len] = 0;
1979 list_add_tail(&ap->link, &aliases_lookup);
1980 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1981 ap->alias, ap->stem, ap->id, np);
1982 }
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1995 {
1996 struct property *pp;
1997
1998 of_aliases = of_find_node_by_path("/aliases");
1999 of_chosen = of_find_node_by_path("/chosen");
2000 if (of_chosen == NULL)
2001 of_chosen = of_find_node_by_path("/chosen@0");
2002
2003 if (of_chosen) {
2004
2005 const char *name = NULL;
2006
2007 if (of_property_read_string(of_chosen, "stdout-path", &name))
2008 of_property_read_string(of_chosen, "linux,stdout-path",
2009 &name);
2010 if (IS_ENABLED(CONFIG_PPC) && !name)
2011 of_property_read_string(of_aliases, "stdout", &name);
2012 if (name)
2013 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
2014 }
2015
2016 if (!of_aliases)
2017 return;
2018
2019 for_each_property_of_node(of_aliases, pp) {
2020 const char *start = pp->name;
2021 const char *end = start + strlen(start);
2022 struct device_node *np;
2023 struct alias_prop *ap;
2024 int id, len;
2025
2026
2027 if (!strcmp(pp->name, "name") ||
2028 !strcmp(pp->name, "phandle") ||
2029 !strcmp(pp->name, "linux,phandle"))
2030 continue;
2031
2032 np = of_find_node_by_path(pp->value);
2033 if (!np)
2034 continue;
2035
2036
2037
2038 while (isdigit(*(end-1)) && end > start)
2039 end--;
2040 len = end - start;
2041
2042 if (kstrtoint(end, 10, &id) < 0)
2043 continue;
2044
2045
2046 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2047 if (!ap)
2048 continue;
2049 memset(ap, 0, sizeof(*ap) + len + 1);
2050 ap->alias = start;
2051 of_alias_add(ap, np, id, start, len);
2052 }
2053 }
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063 int of_alias_get_id(struct device_node *np, const char *stem)
2064 {
2065 struct alias_prop *app;
2066 int id = -ENODEV;
2067
2068 mutex_lock(&of_mutex);
2069 list_for_each_entry(app, &aliases_lookup, link) {
2070 if (strcmp(app->stem, stem) != 0)
2071 continue;
2072
2073 if (np == app->np) {
2074 id = app->id;
2075 break;
2076 }
2077 }
2078 mutex_unlock(&of_mutex);
2079
2080 return id;
2081 }
2082 EXPORT_SYMBOL_GPL(of_alias_get_id);
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097 int of_alias_get_alias_list(const struct of_device_id *matches,
2098 const char *stem, unsigned long *bitmap,
2099 unsigned int nbits)
2100 {
2101 struct alias_prop *app;
2102 int ret = 0;
2103
2104
2105 bitmap_zero(bitmap, nbits);
2106
2107 mutex_lock(&of_mutex);
2108 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2109 list_for_each_entry(app, &aliases_lookup, link) {
2110 pr_debug("%s: stem: %s, id: %d\n",
2111 __func__, app->stem, app->id);
2112
2113 if (strcmp(app->stem, stem) != 0) {
2114 pr_debug("%s: stem comparison didn't pass %s\n",
2115 __func__, app->stem);
2116 continue;
2117 }
2118
2119 if (of_match_node(matches, app->np)) {
2120 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2121
2122 if (app->id >= nbits) {
2123 pr_warn("%s: ID %d >= than bitmap field %d\n",
2124 __func__, app->id, nbits);
2125 ret = -EOVERFLOW;
2126 } else {
2127 set_bit(app->id, bitmap);
2128 }
2129 }
2130 }
2131 mutex_unlock(&of_mutex);
2132
2133 return ret;
2134 }
2135 EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2136
2137
2138
2139
2140
2141
2142
2143
2144 int of_alias_get_highest_id(const char *stem)
2145 {
2146 struct alias_prop *app;
2147 int id = -ENODEV;
2148
2149 mutex_lock(&of_mutex);
2150 list_for_each_entry(app, &aliases_lookup, link) {
2151 if (strcmp(app->stem, stem) != 0)
2152 continue;
2153
2154 if (app->id > id)
2155 id = app->id;
2156 }
2157 mutex_unlock(&of_mutex);
2158
2159 return id;
2160 }
2161 EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 bool of_console_check(struct device_node *dn, char *name, int index)
2174 {
2175 if (!dn || dn != of_stdout || console_set_on_cmdline)
2176 return false;
2177
2178
2179
2180
2181
2182 return !add_preferred_console(name, index, (char *)of_stdout_options);
2183 }
2184 EXPORT_SYMBOL_GPL(of_console_check);
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 struct device_node *of_find_next_cache_node(const struct device_node *np)
2195 {
2196 struct device_node *child, *cache_node;
2197
2198 cache_node = of_parse_phandle(np, "l2-cache", 0);
2199 if (!cache_node)
2200 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2201
2202 if (cache_node)
2203 return cache_node;
2204
2205
2206
2207
2208 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2209 for_each_child_of_node(np, child)
2210 if (of_node_is_type(child, "cache"))
2211 return child;
2212
2213 return NULL;
2214 }
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225 int of_find_last_cache_level(unsigned int cpu)
2226 {
2227 u32 cache_level = 0;
2228 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2229
2230 while (np) {
2231 prev = np;
2232 of_node_put(np);
2233 np = of_find_next_cache_node(np);
2234 }
2235
2236 of_property_read_u32(prev, "cache-level", &cache_level);
2237
2238 return cache_level;
2239 }
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260 int of_map_rid(struct device_node *np, u32 rid,
2261 const char *map_name, const char *map_mask_name,
2262 struct device_node **target, u32 *id_out)
2263 {
2264 u32 map_mask, masked_rid;
2265 int map_len;
2266 const __be32 *map = NULL;
2267
2268 if (!np || !map_name || (!target && !id_out))
2269 return -EINVAL;
2270
2271 map = of_get_property(np, map_name, &map_len);
2272 if (!map) {
2273 if (target)
2274 return -ENODEV;
2275
2276 *id_out = rid;
2277 return 0;
2278 }
2279
2280 if (!map_len || map_len % (4 * sizeof(*map))) {
2281 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2282 map_name, map_len);
2283 return -EINVAL;
2284 }
2285
2286
2287 map_mask = 0xffffffff;
2288
2289
2290
2291
2292
2293 if (map_mask_name)
2294 of_property_read_u32(np, map_mask_name, &map_mask);
2295
2296 masked_rid = map_mask & rid;
2297 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2298 struct device_node *phandle_node;
2299 u32 rid_base = be32_to_cpup(map + 0);
2300 u32 phandle = be32_to_cpup(map + 1);
2301 u32 out_base = be32_to_cpup(map + 2);
2302 u32 rid_len = be32_to_cpup(map + 3);
2303
2304 if (rid_base & ~map_mask) {
2305 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
2306 np, map_name, map_name,
2307 map_mask, rid_base);
2308 return -EFAULT;
2309 }
2310
2311 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
2312 continue;
2313
2314 phandle_node = of_find_node_by_phandle(phandle);
2315 if (!phandle_node)
2316 return -ENODEV;
2317
2318 if (target) {
2319 if (*target)
2320 of_node_put(phandle_node);
2321 else
2322 *target = phandle_node;
2323
2324 if (*target != phandle_node)
2325 continue;
2326 }
2327
2328 if (id_out)
2329 *id_out = masked_rid - rid_base + out_base;
2330
2331 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
2332 np, map_name, map_mask, rid_base, out_base,
2333 rid_len, rid, masked_rid - rid_base + out_base);
2334 return 0;
2335 }
2336
2337 pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name,
2338 rid, target && *target ? *target : NULL);
2339
2340
2341 if (id_out)
2342 *id_out = rid;
2343 return 0;
2344 }
2345 EXPORT_SYMBOL_GPL(of_map_rid);