This source file includes following definitions.
- __mutex_init
- __mutex_owner
- __owner_task
- mutex_is_locked
- mutex_trylock_recursive
- __owner_flags
- __mutex_trylock_or_owner
- __mutex_trylock
- __mutex_trylock_fast
- __mutex_unlock_fast
- __mutex_set_flag
- __mutex_clear_flag
- __mutex_waiter_is_first
- __mutex_add_waiter
- __mutex_handoff
- mutex_lock
- ww_mutex_lock_acquired
- __ww_ctx_stamp_after
- __ww_mutex_die
- __ww_mutex_wound
- __ww_mutex_check_waiters
- ww_mutex_set_context_fastpath
- ww_mutex_spin_on_owner
- mutex_spin_on_owner
- mutex_can_spin_on_owner
- mutex_optimistic_spin
- mutex_optimistic_spin
- mutex_unlock
- ww_mutex_unlock
- __ww_mutex_kill
- __ww_mutex_check_kill
- __ww_mutex_add_waiter
- __mutex_lock_common
- __mutex_lock
- __ww_mutex_lock
- mutex_lock_nested
- _mutex_lock_nest_lock
- mutex_lock_killable_nested
- mutex_lock_interruptible_nested
- mutex_lock_io_nested
- ww_mutex_deadlock_injection
- ww_mutex_lock
- ww_mutex_lock_interruptible
- __mutex_unlock_slowpath
- mutex_lock_interruptible
- mutex_lock_killable
- mutex_lock_io
- __mutex_lock_slowpath
- __mutex_lock_killable_slowpath
- __mutex_lock_interruptible_slowpath
- __ww_mutex_lock_slowpath
- __ww_mutex_lock_interruptible_slowpath
- mutex_trylock
- ww_mutex_lock
- ww_mutex_lock_interruptible
- atomic_dec_and_mutex_lock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
32
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
35 #else
36 # include "mutex.h"
37 #endif
38
39 void
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41 {
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47 #endif
48
49 debug_mutex_init(lock, name, key);
50 }
51 EXPORT_SYMBOL(__mutex_init);
52
53
54
55
56
57
58
59
60
61
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
65
66 #define MUTEX_FLAGS 0x07
67
68
69
70
71
72
73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
74 {
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
76 }
77
78 static inline struct task_struct *__owner_task(unsigned long owner)
79 {
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
81 }
82
83 bool mutex_is_locked(struct mutex *lock)
84 {
85 return __mutex_owner(lock) != NULL;
86 }
87 EXPORT_SYMBOL(mutex_is_locked);
88
89 __must_check enum mutex_trylock_recursive_enum
90 mutex_trylock_recursive(struct mutex *lock)
91 {
92 if (unlikely(__mutex_owner(lock) == current))
93 return MUTEX_TRYLOCK_RECURSIVE;
94
95 return mutex_trylock(lock);
96 }
97 EXPORT_SYMBOL(mutex_trylock_recursive);
98
99 static inline unsigned long __owner_flags(unsigned long owner)
100 {
101 return owner & MUTEX_FLAGS;
102 }
103
104
105
106
107 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
108 {
109 unsigned long owner, curr = (unsigned long)current;
110
111 owner = atomic_long_read(&lock->owner);
112 for (;;) {
113 unsigned long old, flags = __owner_flags(owner);
114 unsigned long task = owner & ~MUTEX_FLAGS;
115
116 if (task) {
117 if (likely(task != curr))
118 break;
119
120 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
121 break;
122
123 flags &= ~MUTEX_FLAG_PICKUP;
124 } else {
125 #ifdef CONFIG_DEBUG_MUTEXES
126 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
127 #endif
128 }
129
130
131
132
133
134
135 flags &= ~MUTEX_FLAG_HANDOFF;
136
137 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
138 if (old == owner)
139 return NULL;
140
141 owner = old;
142 }
143
144 return __owner_task(owner);
145 }
146
147
148
149
150 static inline bool __mutex_trylock(struct mutex *lock)
151 {
152 return !__mutex_trylock_or_owner(lock);
153 }
154
155 #ifndef CONFIG_DEBUG_LOCK_ALLOC
156
157
158
159
160
161
162
163
164
165
166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
167 {
168 unsigned long curr = (unsigned long)current;
169 unsigned long zero = 0UL;
170
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
172 return true;
173
174 return false;
175 }
176
177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
178 {
179 unsigned long curr = (unsigned long)current;
180
181 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
182 return true;
183
184 return false;
185 }
186 #endif
187
188 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
189 {
190 atomic_long_or(flag, &lock->owner);
191 }
192
193 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
194 {
195 atomic_long_andnot(flag, &lock->owner);
196 }
197
198 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
199 {
200 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
201 }
202
203
204
205
206
207 static void __sched
208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
209 struct list_head *list)
210 {
211 debug_mutex_add_waiter(lock, waiter, current);
212
213 list_add_tail(&waiter->list, list);
214 if (__mutex_waiter_is_first(lock, waiter))
215 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
216 }
217
218
219
220
221
222
223
224 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
225 {
226 unsigned long owner = atomic_long_read(&lock->owner);
227
228 for (;;) {
229 unsigned long old, new;
230
231 #ifdef CONFIG_DEBUG_MUTEXES
232 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
233 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
234 #endif
235
236 new = (owner & MUTEX_FLAG_WAITERS);
237 new |= (unsigned long)task;
238 if (task)
239 new |= MUTEX_FLAG_PICKUP;
240
241 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
242 if (old == owner)
243 break;
244
245 owner = old;
246 }
247 }
248
249 #ifndef CONFIG_DEBUG_LOCK_ALLOC
250
251
252
253
254
255
256 static void __sched __mutex_lock_slowpath(struct mutex *lock);
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279 void __sched mutex_lock(struct mutex *lock)
280 {
281 might_sleep();
282
283 if (!__mutex_trylock_fast(lock))
284 __mutex_lock_slowpath(lock);
285 }
286 EXPORT_SYMBOL(mutex_lock);
287 #endif
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305 static __always_inline void
306 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
307 {
308 #ifdef CONFIG_DEBUG_MUTEXES
309
310
311
312
313
314
315 DEBUG_LOCKS_WARN_ON(ww->ctx);
316
317
318
319
320 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
321
322 if (ww_ctx->contending_lock) {
323
324
325
326
327 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
328
329
330
331
332
333 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
334 ww_ctx->contending_lock = NULL;
335 }
336
337
338
339
340 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
341 #endif
342 ww_ctx->acquired++;
343 ww->ctx = ww_ctx;
344 }
345
346
347
348
349
350
351 static inline bool __sched
352 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
353 {
354
355 return (signed long)(a->stamp - b->stamp) > 0;
356 }
357
358
359
360
361
362
363
364
365
366 static bool __sched
367 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
368 struct ww_acquire_ctx *ww_ctx)
369 {
370 if (!ww_ctx->is_wait_die)
371 return false;
372
373 if (waiter->ww_ctx->acquired > 0 &&
374 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
375 debug_mutex_wake_waiter(lock, waiter);
376 wake_up_process(waiter->task);
377 }
378
379 return true;
380 }
381
382
383
384
385
386
387
388
389 static bool __ww_mutex_wound(struct mutex *lock,
390 struct ww_acquire_ctx *ww_ctx,
391 struct ww_acquire_ctx *hold_ctx)
392 {
393 struct task_struct *owner = __mutex_owner(lock);
394
395 lockdep_assert_held(&lock->wait_lock);
396
397
398
399
400
401
402 if (!hold_ctx)
403 return false;
404
405
406
407
408
409
410 if (!owner)
411 return false;
412
413 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
414 hold_ctx->wounded = 1;
415
416
417
418
419
420
421
422 if (owner != current)
423 wake_up_process(owner);
424
425 return true;
426 }
427
428 return false;
429 }
430
431
432
433
434
435
436
437
438
439
440
441
442
443 static void __sched
444 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
445 {
446 struct mutex_waiter *cur;
447
448 lockdep_assert_held(&lock->wait_lock);
449
450 list_for_each_entry(cur, &lock->wait_list, list) {
451 if (!cur->ww_ctx)
452 continue;
453
454 if (__ww_mutex_die(lock, cur, ww_ctx) ||
455 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
456 break;
457 }
458 }
459
460
461
462
463
464 static __always_inline void
465 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
466 {
467 ww_mutex_lock_acquired(lock, ctx);
468
469
470
471
472
473
474
475
476 smp_mb();
477
478
479
480
481
482
483
484
485
486
487 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
488 return;
489
490
491
492
493
494 spin_lock(&lock->base.wait_lock);
495 __ww_mutex_check_waiters(&lock->base, ctx);
496 spin_unlock(&lock->base.wait_lock);
497 }
498
499 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
500
501 static inline
502 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
503 struct mutex_waiter *waiter)
504 {
505 struct ww_mutex *ww;
506
507 ww = container_of(lock, struct ww_mutex, base);
508
509
510
511
512
513
514
515
516
517
518
519
520 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
521 return false;
522
523
524
525
526
527
528
529
530 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
531 return false;
532
533
534
535
536
537 if (waiter && !__mutex_waiter_is_first(lock, waiter))
538 return false;
539
540 return true;
541 }
542
543
544
545
546
547
548
549 static noinline
550 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
551 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
552 {
553 bool ret = true;
554
555 rcu_read_lock();
556 while (__mutex_owner(lock) == owner) {
557
558
559
560
561
562
563 barrier();
564
565
566
567
568 if (!owner->on_cpu || need_resched() ||
569 vcpu_is_preempted(task_cpu(owner))) {
570 ret = false;
571 break;
572 }
573
574 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
575 ret = false;
576 break;
577 }
578
579 cpu_relax();
580 }
581 rcu_read_unlock();
582
583 return ret;
584 }
585
586
587
588
589 static inline int mutex_can_spin_on_owner(struct mutex *lock)
590 {
591 struct task_struct *owner;
592 int retval = 1;
593
594 if (need_resched())
595 return 0;
596
597 rcu_read_lock();
598 owner = __mutex_owner(lock);
599
600
601
602
603
604 if (owner)
605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
606 rcu_read_unlock();
607
608
609
610
611
612
613 return retval;
614 }
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637 static __always_inline bool
638 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
639 const bool use_ww_ctx, struct mutex_waiter *waiter)
640 {
641 if (!waiter) {
642
643
644
645
646
647
648
649 if (!mutex_can_spin_on_owner(lock))
650 goto fail;
651
652
653
654
655
656
657 if (!osq_lock(&lock->osq))
658 goto fail;
659 }
660
661 for (;;) {
662 struct task_struct *owner;
663
664
665 owner = __mutex_trylock_or_owner(lock);
666 if (!owner)
667 break;
668
669
670
671
672
673 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
674 goto fail_unlock;
675
676
677
678
679
680
681
682 cpu_relax();
683 }
684
685 if (!waiter)
686 osq_unlock(&lock->osq);
687
688 return true;
689
690
691 fail_unlock:
692 if (!waiter)
693 osq_unlock(&lock->osq);
694
695 fail:
696
697
698
699
700
701 if (need_resched()) {
702
703
704
705
706 __set_current_state(TASK_RUNNING);
707 schedule_preempt_disabled();
708 }
709
710 return false;
711 }
712 #else
713 static __always_inline bool
714 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
715 const bool use_ww_ctx, struct mutex_waiter *waiter)
716 {
717 return false;
718 }
719 #endif
720
721 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
722
723
724
725
726
727
728
729
730
731
732
733
734 void __sched mutex_unlock(struct mutex *lock)
735 {
736 #ifndef CONFIG_DEBUG_LOCK_ALLOC
737 if (__mutex_unlock_fast(lock))
738 return;
739 #endif
740 __mutex_unlock_slowpath(lock, _RET_IP_);
741 }
742 EXPORT_SYMBOL(mutex_unlock);
743
744
745
746
747
748
749
750
751
752
753
754
755 void __sched ww_mutex_unlock(struct ww_mutex *lock)
756 {
757
758
759
760
761 if (lock->ctx) {
762 #ifdef CONFIG_DEBUG_MUTEXES
763 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
764 #endif
765 if (lock->ctx->acquired > 0)
766 lock->ctx->acquired--;
767 lock->ctx = NULL;
768 }
769
770 mutex_unlock(&lock->base);
771 }
772 EXPORT_SYMBOL(ww_mutex_unlock);
773
774
775 static __always_inline int __sched
776 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
777 {
778 if (ww_ctx->acquired > 0) {
779 #ifdef CONFIG_DEBUG_MUTEXES
780 struct ww_mutex *ww;
781
782 ww = container_of(lock, struct ww_mutex, base);
783 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
784 ww_ctx->contending_lock = ww;
785 #endif
786 return -EDEADLK;
787 }
788
789 return 0;
790 }
791
792
793
794
795
796
797
798
799
800
801
802
803
804 static inline int __sched
805 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
806 struct ww_acquire_ctx *ctx)
807 {
808 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
809 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
810 struct mutex_waiter *cur;
811
812 if (ctx->acquired == 0)
813 return 0;
814
815 if (!ctx->is_wait_die) {
816 if (ctx->wounded)
817 return __ww_mutex_kill(lock, ctx);
818
819 return 0;
820 }
821
822 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
823 return __ww_mutex_kill(lock, ctx);
824
825
826
827
828
829 cur = waiter;
830 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
831 if (!cur->ww_ctx)
832 continue;
833
834 return __ww_mutex_kill(lock, ctx);
835 }
836
837 return 0;
838 }
839
840
841
842
843
844
845
846
847
848
849
850
851 static inline int __sched
852 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
853 struct mutex *lock,
854 struct ww_acquire_ctx *ww_ctx)
855 {
856 struct mutex_waiter *cur;
857 struct list_head *pos;
858 bool is_wait_die;
859
860 if (!ww_ctx) {
861 __mutex_add_waiter(lock, waiter, &lock->wait_list);
862 return 0;
863 }
864
865 is_wait_die = ww_ctx->is_wait_die;
866
867
868
869
870
871
872
873
874 pos = &lock->wait_list;
875 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
876 if (!cur->ww_ctx)
877 continue;
878
879 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
880
881
882
883
884
885 if (is_wait_die) {
886 int ret = __ww_mutex_kill(lock, ww_ctx);
887
888 if (ret)
889 return ret;
890 }
891
892 break;
893 }
894
895 pos = &cur->list;
896
897
898 __ww_mutex_die(lock, cur, ww_ctx);
899 }
900
901 __mutex_add_waiter(lock, waiter, pos);
902
903
904
905
906
907 if (!is_wait_die) {
908 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
909
910
911
912
913
914
915 smp_mb();
916 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
917 }
918
919 return 0;
920 }
921
922
923
924
925 static __always_inline int __sched
926 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
927 struct lockdep_map *nest_lock, unsigned long ip,
928 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
929 {
930 struct mutex_waiter waiter;
931 bool first = false;
932 struct ww_mutex *ww;
933 int ret;
934
935 might_sleep();
936
937 #ifdef CONFIG_DEBUG_MUTEXES
938 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
939 #endif
940
941 ww = container_of(lock, struct ww_mutex, base);
942 if (use_ww_ctx && ww_ctx) {
943 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
944 return -EALREADY;
945
946
947
948
949
950
951 if (ww_ctx->acquired == 0)
952 ww_ctx->wounded = 0;
953 }
954
955 preempt_disable();
956 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
957
958 if (__mutex_trylock(lock) ||
959 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
960
961 lock_acquired(&lock->dep_map, ip);
962 if (use_ww_ctx && ww_ctx)
963 ww_mutex_set_context_fastpath(ww, ww_ctx);
964 preempt_enable();
965 return 0;
966 }
967
968 spin_lock(&lock->wait_lock);
969
970
971
972 if (__mutex_trylock(lock)) {
973 if (use_ww_ctx && ww_ctx)
974 __ww_mutex_check_waiters(lock, ww_ctx);
975
976 goto skip_wait;
977 }
978
979 debug_mutex_lock_common(lock, &waiter);
980
981 lock_contended(&lock->dep_map, ip);
982
983 if (!use_ww_ctx) {
984
985 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
986
987
988 #ifdef CONFIG_DEBUG_MUTEXES
989 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
990 #endif
991 } else {
992
993
994
995
996 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
997 if (ret)
998 goto err_early_kill;
999
1000 waiter.ww_ctx = ww_ctx;
1001 }
1002
1003 waiter.task = current;
1004
1005 set_current_state(state);
1006 for (;;) {
1007
1008
1009
1010
1011
1012
1013 if (__mutex_trylock(lock))
1014 goto acquired;
1015
1016
1017
1018
1019
1020
1021 if (signal_pending_state(state, current)) {
1022 ret = -EINTR;
1023 goto err;
1024 }
1025
1026 if (use_ww_ctx && ww_ctx) {
1027 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1028 if (ret)
1029 goto err;
1030 }
1031
1032 spin_unlock(&lock->wait_lock);
1033 schedule_preempt_disabled();
1034
1035
1036
1037
1038
1039 if ((use_ww_ctx && ww_ctx) || !first) {
1040 first = __mutex_waiter_is_first(lock, &waiter);
1041 if (first)
1042 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1043 }
1044
1045 set_current_state(state);
1046
1047
1048
1049
1050
1051 if (__mutex_trylock(lock) ||
1052 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1053 break;
1054
1055 spin_lock(&lock->wait_lock);
1056 }
1057 spin_lock(&lock->wait_lock);
1058 acquired:
1059 __set_current_state(TASK_RUNNING);
1060
1061 if (use_ww_ctx && ww_ctx) {
1062
1063
1064
1065
1066 if (!ww_ctx->is_wait_die &&
1067 !__mutex_waiter_is_first(lock, &waiter))
1068 __ww_mutex_check_waiters(lock, ww_ctx);
1069 }
1070
1071 mutex_remove_waiter(lock, &waiter, current);
1072 if (likely(list_empty(&lock->wait_list)))
1073 __mutex_clear_flag(lock, MUTEX_FLAGS);
1074
1075 debug_mutex_free_waiter(&waiter);
1076
1077 skip_wait:
1078
1079 lock_acquired(&lock->dep_map, ip);
1080
1081 if (use_ww_ctx && ww_ctx)
1082 ww_mutex_lock_acquired(ww, ww_ctx);
1083
1084 spin_unlock(&lock->wait_lock);
1085 preempt_enable();
1086 return 0;
1087
1088 err:
1089 __set_current_state(TASK_RUNNING);
1090 mutex_remove_waiter(lock, &waiter, current);
1091 err_early_kill:
1092 spin_unlock(&lock->wait_lock);
1093 debug_mutex_free_waiter(&waiter);
1094 mutex_release(&lock->dep_map, 1, ip);
1095 preempt_enable();
1096 return ret;
1097 }
1098
1099 static int __sched
1100 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1101 struct lockdep_map *nest_lock, unsigned long ip)
1102 {
1103 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1104 }
1105
1106 static int __sched
1107 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1108 struct lockdep_map *nest_lock, unsigned long ip,
1109 struct ww_acquire_ctx *ww_ctx)
1110 {
1111 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1112 }
1113
1114 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1115 void __sched
1116 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1117 {
1118 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1119 }
1120
1121 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1122
1123 void __sched
1124 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1125 {
1126 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1127 }
1128 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1129
1130 int __sched
1131 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1132 {
1133 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1134 }
1135 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1136
1137 int __sched
1138 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1139 {
1140 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1141 }
1142 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1143
1144 void __sched
1145 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1146 {
1147 int token;
1148
1149 might_sleep();
1150
1151 token = io_schedule_prepare();
1152 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1153 subclass, NULL, _RET_IP_, NULL, 0);
1154 io_schedule_finish(token);
1155 }
1156 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1157
1158 static inline int
1159 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1160 {
1161 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1162 unsigned tmp;
1163
1164 if (ctx->deadlock_inject_countdown-- == 0) {
1165 tmp = ctx->deadlock_inject_interval;
1166 if (tmp > UINT_MAX/4)
1167 tmp = UINT_MAX;
1168 else
1169 tmp = tmp*2 + tmp + tmp/2;
1170
1171 ctx->deadlock_inject_interval = tmp;
1172 ctx->deadlock_inject_countdown = tmp;
1173 ctx->contending_lock = lock;
1174
1175 ww_mutex_unlock(lock);
1176
1177 return -EDEADLK;
1178 }
1179 #endif
1180
1181 return 0;
1182 }
1183
1184 int __sched
1185 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1186 {
1187 int ret;
1188
1189 might_sleep();
1190 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1191 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1192 ctx);
1193 if (!ret && ctx && ctx->acquired > 1)
1194 return ww_mutex_deadlock_injection(lock, ctx);
1195
1196 return ret;
1197 }
1198 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1199
1200 int __sched
1201 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1202 {
1203 int ret;
1204
1205 might_sleep();
1206 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1207 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1208 ctx);
1209
1210 if (!ret && ctx && ctx->acquired > 1)
1211 return ww_mutex_deadlock_injection(lock, ctx);
1212
1213 return ret;
1214 }
1215 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1216
1217 #endif
1218
1219
1220
1221
1222 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1223 {
1224 struct task_struct *next = NULL;
1225 DEFINE_WAKE_Q(wake_q);
1226 unsigned long owner;
1227
1228 mutex_release(&lock->dep_map, 1, ip);
1229
1230
1231
1232
1233
1234
1235
1236
1237 owner = atomic_long_read(&lock->owner);
1238 for (;;) {
1239 unsigned long old;
1240
1241 #ifdef CONFIG_DEBUG_MUTEXES
1242 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1243 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1244 #endif
1245
1246 if (owner & MUTEX_FLAG_HANDOFF)
1247 break;
1248
1249 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1250 __owner_flags(owner));
1251 if (old == owner) {
1252 if (owner & MUTEX_FLAG_WAITERS)
1253 break;
1254
1255 return;
1256 }
1257
1258 owner = old;
1259 }
1260
1261 spin_lock(&lock->wait_lock);
1262 debug_mutex_unlock(lock);
1263 if (!list_empty(&lock->wait_list)) {
1264
1265 struct mutex_waiter *waiter =
1266 list_first_entry(&lock->wait_list,
1267 struct mutex_waiter, list);
1268
1269 next = waiter->task;
1270
1271 debug_mutex_wake_waiter(lock, waiter);
1272 wake_q_add(&wake_q, next);
1273 }
1274
1275 if (owner & MUTEX_FLAG_HANDOFF)
1276 __mutex_handoff(lock, next);
1277
1278 spin_unlock(&lock->wait_lock);
1279
1280 wake_up_q(&wake_q);
1281 }
1282
1283 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1284
1285
1286
1287
1288 static noinline int __sched
1289 __mutex_lock_killable_slowpath(struct mutex *lock);
1290
1291 static noinline int __sched
1292 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 int __sched mutex_lock_interruptible(struct mutex *lock)
1307 {
1308 might_sleep();
1309
1310 if (__mutex_trylock_fast(lock))
1311 return 0;
1312
1313 return __mutex_lock_interruptible_slowpath(lock);
1314 }
1315
1316 EXPORT_SYMBOL(mutex_lock_interruptible);
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 int __sched mutex_lock_killable(struct mutex *lock)
1331 {
1332 might_sleep();
1333
1334 if (__mutex_trylock_fast(lock))
1335 return 0;
1336
1337 return __mutex_lock_killable_slowpath(lock);
1338 }
1339 EXPORT_SYMBOL(mutex_lock_killable);
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 void __sched mutex_lock_io(struct mutex *lock)
1352 {
1353 int token;
1354
1355 token = io_schedule_prepare();
1356 mutex_lock(lock);
1357 io_schedule_finish(token);
1358 }
1359 EXPORT_SYMBOL_GPL(mutex_lock_io);
1360
1361 static noinline void __sched
1362 __mutex_lock_slowpath(struct mutex *lock)
1363 {
1364 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1365 }
1366
1367 static noinline int __sched
1368 __mutex_lock_killable_slowpath(struct mutex *lock)
1369 {
1370 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1371 }
1372
1373 static noinline int __sched
1374 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1375 {
1376 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1377 }
1378
1379 static noinline int __sched
1380 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1381 {
1382 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1383 _RET_IP_, ctx);
1384 }
1385
1386 static noinline int __sched
1387 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1388 struct ww_acquire_ctx *ctx)
1389 {
1390 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1391 _RET_IP_, ctx);
1392 }
1393
1394 #endif
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 int __sched mutex_trylock(struct mutex *lock)
1411 {
1412 bool locked;
1413
1414 #ifdef CONFIG_DEBUG_MUTEXES
1415 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1416 #endif
1417
1418 locked = __mutex_trylock(lock);
1419 if (locked)
1420 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1421
1422 return locked;
1423 }
1424 EXPORT_SYMBOL(mutex_trylock);
1425
1426 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1427 int __sched
1428 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1429 {
1430 might_sleep();
1431
1432 if (__mutex_trylock_fast(&lock->base)) {
1433 if (ctx)
1434 ww_mutex_set_context_fastpath(lock, ctx);
1435 return 0;
1436 }
1437
1438 return __ww_mutex_lock_slowpath(lock, ctx);
1439 }
1440 EXPORT_SYMBOL(ww_mutex_lock);
1441
1442 int __sched
1443 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1444 {
1445 might_sleep();
1446
1447 if (__mutex_trylock_fast(&lock->base)) {
1448 if (ctx)
1449 ww_mutex_set_context_fastpath(lock, ctx);
1450 return 0;
1451 }
1452
1453 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1454 }
1455 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1456
1457 #endif
1458
1459
1460
1461
1462
1463
1464
1465
1466 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1467 {
1468
1469 if (atomic_add_unless(cnt, -1, 1))
1470 return 0;
1471
1472 mutex_lock(lock);
1473 if (!atomic_dec_and_test(cnt)) {
1474
1475 mutex_unlock(lock);
1476 return 0;
1477 }
1478
1479 return 1;
1480 }
1481 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);