This source file includes following definitions.
- __hwspin_trylock
- __hwspin_lock_timeout
- __hwspin_unlock
- of_hwspin_lock_simple_xlate
- of_hwspin_lock_get_id
- of_hwspin_lock_get_id_byname
- hwspin_lock_register_single
- hwspin_lock_unregister_single
- hwspin_lock_register
- hwspin_lock_unregister
- devm_hwspin_lock_unreg
- devm_hwspin_lock_device_match
- devm_hwspin_lock_unregister
- devm_hwspin_lock_register
- __hwspin_lock_request
- hwspin_lock_get_id
- hwspin_lock_request
- hwspin_lock_request_specific
- hwspin_lock_free
- devm_hwspin_lock_match
- devm_hwspin_lock_release
- devm_hwspin_lock_free
- devm_hwspin_lock_request
- devm_hwspin_lock_request_specific
1
2
3
4
5
6
7
8
9
10 #define pr_fmt(fmt) "%s: " fmt, __func__
11
12 #include <linux/delay.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/err.h>
18 #include <linux/jiffies.h>
19 #include <linux/radix-tree.h>
20 #include <linux/hwspinlock.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/mutex.h>
23 #include <linux/of.h>
24
25 #include "hwspinlock_internal.h"
26
27
28 #define HWSPINLOCK_RETRY_DELAY_US 100
29
30
31 #define HWSPINLOCK_UNUSED (0)
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
51
52
53
54
55
56
57 static DEFINE_MUTEX(hwspinlock_tree_lock);
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
92 {
93 int ret;
94
95 BUG_ON(!hwlock);
96 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 switch (mode) {
112 case HWLOCK_IRQSTATE:
113 ret = spin_trylock_irqsave(&hwlock->lock, *flags);
114 break;
115 case HWLOCK_IRQ:
116 ret = spin_trylock_irq(&hwlock->lock);
117 break;
118 case HWLOCK_RAW:
119 case HWLOCK_IN_ATOMIC:
120 ret = 1;
121 break;
122 default:
123 ret = spin_trylock(&hwlock->lock);
124 break;
125 }
126
127
128 if (!ret)
129 return -EBUSY;
130
131
132 ret = hwlock->bank->ops->trylock(hwlock);
133
134
135 if (!ret) {
136 switch (mode) {
137 case HWLOCK_IRQSTATE:
138 spin_unlock_irqrestore(&hwlock->lock, *flags);
139 break;
140 case HWLOCK_IRQ:
141 spin_unlock_irq(&hwlock->lock);
142 break;
143 case HWLOCK_RAW:
144 case HWLOCK_IN_ATOMIC:
145
146 break;
147 default:
148 spin_unlock(&hwlock->lock);
149 break;
150 }
151
152 return -EBUSY;
153 }
154
155
156
157
158
159
160
161
162
163
164
165 mb();
166
167 return 0;
168 }
169 EXPORT_SYMBOL_GPL(__hwspin_trylock);
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
207 int mode, unsigned long *flags)
208 {
209 int ret;
210 unsigned long expire, atomic_delay = 0;
211
212 expire = msecs_to_jiffies(to) + jiffies;
213
214 for (;;) {
215
216 ret = __hwspin_trylock(hwlock, mode, flags);
217 if (ret != -EBUSY)
218 break;
219
220
221
222
223
224 if (mode == HWLOCK_IN_ATOMIC) {
225 udelay(HWSPINLOCK_RETRY_DELAY_US);
226 atomic_delay += HWSPINLOCK_RETRY_DELAY_US;
227 if (atomic_delay > to * 1000)
228 return -ETIMEDOUT;
229 } else {
230 if (time_is_before_eq_jiffies(expire))
231 return -ETIMEDOUT;
232 }
233
234
235
236
237
238 if (hwlock->bank->ops->relax)
239 hwlock->bank->ops->relax(hwlock);
240 }
241
242 return ret;
243 }
244 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
266 {
267 BUG_ON(!hwlock);
268 BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
269
270
271
272
273
274
275
276
277
278
279
280
281
282 mb();
283
284 hwlock->bank->ops->unlock(hwlock);
285
286
287 switch (mode) {
288 case HWLOCK_IRQSTATE:
289 spin_unlock_irqrestore(&hwlock->lock, *flags);
290 break;
291 case HWLOCK_IRQ:
292 spin_unlock_irq(&hwlock->lock);
293 break;
294 case HWLOCK_RAW:
295 case HWLOCK_IN_ATOMIC:
296
297 break;
298 default:
299 spin_unlock(&hwlock->lock);
300 break;
301 }
302 }
303 EXPORT_SYMBOL_GPL(__hwspin_unlock);
304
305
306
307
308
309
310
311
312
313
314
315
316 static inline int
317 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
318 {
319 if (WARN_ON(hwlock_spec->args_count != 1))
320 return -EINVAL;
321
322 return hwlock_spec->args[0];
323 }
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339 int of_hwspin_lock_get_id(struct device_node *np, int index)
340 {
341 struct of_phandle_args args;
342 struct hwspinlock *hwlock;
343 struct radix_tree_iter iter;
344 void **slot;
345 int id;
346 int ret;
347
348 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
349 &args);
350 if (ret)
351 return ret;
352
353 if (!of_device_is_available(args.np)) {
354 ret = -ENOENT;
355 goto out;
356 }
357
358
359 ret = -EPROBE_DEFER;
360 rcu_read_lock();
361 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
362 hwlock = radix_tree_deref_slot(slot);
363 if (unlikely(!hwlock))
364 continue;
365 if (radix_tree_deref_retry(hwlock)) {
366 slot = radix_tree_iter_retry(&iter);
367 continue;
368 }
369
370 if (hwlock->bank->dev->of_node == args.np) {
371 ret = 0;
372 break;
373 }
374 }
375 rcu_read_unlock();
376 if (ret < 0)
377 goto out;
378
379 id = of_hwspin_lock_simple_xlate(&args);
380 if (id < 0 || id >= hwlock->bank->num_locks) {
381 ret = -EINVAL;
382 goto out;
383 }
384 id += hwlock->bank->base_id;
385
386 out:
387 of_node_put(args.np);
388 return ret ? ret : id;
389 }
390 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
407 {
408 int index;
409
410 if (!name)
411 return -EINVAL;
412
413 index = of_property_match_string(np, "hwlock-names", name);
414 if (index < 0)
415 return index;
416
417 return of_hwspin_lock_get_id(np, index);
418 }
419 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname);
420
421 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
422 {
423 struct hwspinlock *tmp;
424 int ret;
425
426 mutex_lock(&hwspinlock_tree_lock);
427
428 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
429 if (ret) {
430 if (ret == -EEXIST)
431 pr_err("hwspinlock id %d already exists!\n", id);
432 goto out;
433 }
434
435
436 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
437
438
439 WARN_ON(tmp != hwlock);
440
441 out:
442 mutex_unlock(&hwspinlock_tree_lock);
443 return 0;
444 }
445
446 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
447 {
448 struct hwspinlock *hwlock = NULL;
449 int ret;
450
451 mutex_lock(&hwspinlock_tree_lock);
452
453
454 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
455 if (ret == 0) {
456 pr_err("hwspinlock %d still in use (or not present)\n", id);
457 goto out;
458 }
459
460 hwlock = radix_tree_delete(&hwspinlock_tree, id);
461 if (!hwlock) {
462 pr_err("failed to delete hwspinlock %d\n", id);
463 goto out;
464 }
465
466 out:
467 mutex_unlock(&hwspinlock_tree_lock);
468 return hwlock;
469 }
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
487 const struct hwspinlock_ops *ops, int base_id, int num_locks)
488 {
489 struct hwspinlock *hwlock;
490 int ret = 0, i;
491
492 if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
493 !ops->unlock) {
494 pr_err("invalid parameters\n");
495 return -EINVAL;
496 }
497
498 bank->dev = dev;
499 bank->ops = ops;
500 bank->base_id = base_id;
501 bank->num_locks = num_locks;
502
503 for (i = 0; i < num_locks; i++) {
504 hwlock = &bank->lock[i];
505
506 spin_lock_init(&hwlock->lock);
507 hwlock->bank = bank;
508
509 ret = hwspin_lock_register_single(hwlock, base_id + i);
510 if (ret)
511 goto reg_failed;
512 }
513
514 return 0;
515
516 reg_failed:
517 while (--i >= 0)
518 hwspin_lock_unregister_single(base_id + i);
519 return ret;
520 }
521 EXPORT_SYMBOL_GPL(hwspin_lock_register);
522
523
524
525
526
527
528
529
530
531
532
533
534 int hwspin_lock_unregister(struct hwspinlock_device *bank)
535 {
536 struct hwspinlock *hwlock, *tmp;
537 int i;
538
539 for (i = 0; i < bank->num_locks; i++) {
540 hwlock = &bank->lock[i];
541
542 tmp = hwspin_lock_unregister_single(bank->base_id + i);
543 if (!tmp)
544 return -EBUSY;
545
546
547 WARN_ON(tmp != hwlock);
548 }
549
550 return 0;
551 }
552 EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
553
554 static void devm_hwspin_lock_unreg(struct device *dev, void *res)
555 {
556 hwspin_lock_unregister(*(struct hwspinlock_device **)res);
557 }
558
559 static int devm_hwspin_lock_device_match(struct device *dev, void *res,
560 void *data)
561 {
562 struct hwspinlock_device **bank = res;
563
564 if (WARN_ON(!bank || !*bank))
565 return 0;
566
567 return *bank == data;
568 }
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583 int devm_hwspin_lock_unregister(struct device *dev,
584 struct hwspinlock_device *bank)
585 {
586 int ret;
587
588 ret = devres_release(dev, devm_hwspin_lock_unreg,
589 devm_hwspin_lock_device_match, bank);
590 WARN_ON(ret);
591
592 return ret;
593 }
594 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister);
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612 int devm_hwspin_lock_register(struct device *dev,
613 struct hwspinlock_device *bank,
614 const struct hwspinlock_ops *ops,
615 int base_id, int num_locks)
616 {
617 struct hwspinlock_device **ptr;
618 int ret;
619
620 ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL);
621 if (!ptr)
622 return -ENOMEM;
623
624 ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks);
625 if (!ret) {
626 *ptr = bank;
627 devres_add(dev, ptr);
628 } else {
629 devres_free(ptr);
630 }
631
632 return ret;
633 }
634 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register);
635
636
637
638
639
640
641
642
643
644
645
646 static int __hwspin_lock_request(struct hwspinlock *hwlock)
647 {
648 struct device *dev = hwlock->bank->dev;
649 struct hwspinlock *tmp;
650 int ret;
651
652
653 if (!try_module_get(dev->driver->owner)) {
654 dev_err(dev, "%s: can't get owner\n", __func__);
655 return -EINVAL;
656 }
657
658
659 ret = pm_runtime_get_sync(dev);
660 if (ret < 0) {
661 dev_err(dev, "%s: can't power on device\n", __func__);
662 pm_runtime_put_noidle(dev);
663 module_put(dev->driver->owner);
664 return ret;
665 }
666
667
668 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
669 HWSPINLOCK_UNUSED);
670
671
672 WARN_ON(tmp != hwlock);
673
674 return ret;
675 }
676
677
678
679
680
681
682
683 int hwspin_lock_get_id(struct hwspinlock *hwlock)
684 {
685 if (!hwlock) {
686 pr_err("invalid hwlock\n");
687 return -EINVAL;
688 }
689
690 return hwlock_to_id(hwlock);
691 }
692 EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707 struct hwspinlock *hwspin_lock_request(void)
708 {
709 struct hwspinlock *hwlock;
710 int ret;
711
712 mutex_lock(&hwspinlock_tree_lock);
713
714
715 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
716 0, 1, HWSPINLOCK_UNUSED);
717 if (ret == 0) {
718 pr_warn("a free hwspinlock is not available\n");
719 hwlock = NULL;
720 goto out;
721 }
722
723
724 WARN_ON(ret > 1);
725
726
727 ret = __hwspin_lock_request(hwlock);
728 if (ret < 0)
729 hwlock = NULL;
730
731 out:
732 mutex_unlock(&hwspinlock_tree_lock);
733 return hwlock;
734 }
735 EXPORT_SYMBOL_GPL(hwspin_lock_request);
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750 struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
751 {
752 struct hwspinlock *hwlock;
753 int ret;
754
755 mutex_lock(&hwspinlock_tree_lock);
756
757
758 hwlock = radix_tree_lookup(&hwspinlock_tree, id);
759 if (!hwlock) {
760 pr_warn("hwspinlock %u does not exist\n", id);
761 goto out;
762 }
763
764
765 WARN_ON(hwlock_to_id(hwlock) != id);
766
767
768 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
769 if (ret == 0) {
770 pr_warn("hwspinlock %u is already in use\n", id);
771 hwlock = NULL;
772 goto out;
773 }
774
775
776 ret = __hwspin_lock_request(hwlock);
777 if (ret < 0)
778 hwlock = NULL;
779
780 out:
781 mutex_unlock(&hwspinlock_tree_lock);
782 return hwlock;
783 }
784 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
785
786
787
788
789
790
791
792
793
794
795
796
797
798 int hwspin_lock_free(struct hwspinlock *hwlock)
799 {
800 struct device *dev;
801 struct hwspinlock *tmp;
802 int ret;
803
804 if (!hwlock) {
805 pr_err("invalid hwlock\n");
806 return -EINVAL;
807 }
808
809 dev = hwlock->bank->dev;
810 mutex_lock(&hwspinlock_tree_lock);
811
812
813 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
814 HWSPINLOCK_UNUSED);
815 if (ret == 1) {
816 dev_err(dev, "%s: hwlock is already free\n", __func__);
817 dump_stack();
818 ret = -EINVAL;
819 goto out;
820 }
821
822
823 ret = pm_runtime_put(dev);
824 if (ret < 0)
825 goto out;
826
827
828 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
829 HWSPINLOCK_UNUSED);
830
831
832 WARN_ON(tmp != hwlock);
833
834 module_put(dev->driver->owner);
835
836 out:
837 mutex_unlock(&hwspinlock_tree_lock);
838 return ret;
839 }
840 EXPORT_SYMBOL_GPL(hwspin_lock_free);
841
842 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data)
843 {
844 struct hwspinlock **hwlock = res;
845
846 if (WARN_ON(!hwlock || !*hwlock))
847 return 0;
848
849 return *hwlock == data;
850 }
851
852 static void devm_hwspin_lock_release(struct device *dev, void *res)
853 {
854 hwspin_lock_free(*(struct hwspinlock **)res);
855 }
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
871 {
872 int ret;
873
874 ret = devres_release(dev, devm_hwspin_lock_release,
875 devm_hwspin_lock_match, hwlock);
876 WARN_ON(ret);
877
878 return ret;
879 }
880 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896 struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
897 {
898 struct hwspinlock **ptr, *hwlock;
899
900 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
901 if (!ptr)
902 return NULL;
903
904 hwlock = hwspin_lock_request();
905 if (hwlock) {
906 *ptr = hwlock;
907 devres_add(dev, ptr);
908 } else {
909 devres_free(ptr);
910 }
911
912 return hwlock;
913 }
914 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
932 unsigned int id)
933 {
934 struct hwspinlock **ptr, *hwlock;
935
936 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
937 if (!ptr)
938 return NULL;
939
940 hwlock = hwspin_lock_request_specific(id);
941 if (hwlock) {
942 *ptr = hwlock;
943 devres_add(dev, ptr);
944 } else {
945 devres_free(ptr);
946 }
947
948 return hwlock;
949 }
950 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific);
951
952 MODULE_LICENSE("GPL v2");
953 MODULE_DESCRIPTION("Hardware spinlock interface");
954 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");