1 /*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18 #ifndef __ASM_GENERIC_QSPINLOCK_H
19 #define __ASM_GENERIC_QSPINLOCK_H
20
21 #include <asm-generic/qspinlock_types.h>
22
23 /**
24 * queued_spin_is_locked - is the spinlock locked?
25 * @lock: Pointer to queued spinlock structure
26 * Return: 1 if it is locked, 0 otherwise
27 */
queued_spin_is_locked(struct qspinlock * lock)28 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
29 {
30 /*
31 * queued_spin_lock_slowpath() can ACQUIRE the lock before
32 * issuing the unordered store that sets _Q_LOCKED_VAL.
33 *
34 * See both smp_cond_acquire() sites for more detail.
35 *
36 * This however means that in code like:
37 *
38 * spin_lock(A) spin_lock(B)
39 * spin_unlock_wait(B) spin_is_locked(A)
40 * do_something() do_something()
41 *
42 * Both CPUs can end up running do_something() because the store
43 * setting _Q_LOCKED_VAL will pass through the loads in
44 * spin_unlock_wait() and/or spin_is_locked().
45 *
46 * Avoid this by issuing a full memory barrier between the spin_lock()
47 * and the loads in spin_unlock_wait() and spin_is_locked().
48 *
49 * Note that regular mutual exclusion doesn't care about this
50 * delayed store.
51 */
52 smp_mb();
53 return atomic_read(&lock->val) & _Q_LOCKED_MASK;
54 }
55
56 /**
57 * queued_spin_value_unlocked - is the spinlock structure unlocked?
58 * @lock: queued spinlock structure
59 * Return: 1 if it is unlocked, 0 otherwise
60 *
61 * N.B. Whenever there are tasks waiting for the lock, it is considered
62 * locked wrt the lockref code to avoid lock stealing by the lockref
63 * code and change things underneath the lock. This also allows some
64 * optimizations to be applied without conflict with lockref.
65 */
queued_spin_value_unlocked(struct qspinlock lock)66 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
67 {
68 return !atomic_read(&lock.val);
69 }
70
71 /**
72 * queued_spin_is_contended - check if the lock is contended
73 * @lock : Pointer to queued spinlock structure
74 * Return: 1 if lock contended, 0 otherwise
75 */
queued_spin_is_contended(struct qspinlock * lock)76 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
77 {
78 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
79 }
80 /**
81 * queued_spin_trylock - try to acquire the queued spinlock
82 * @lock : Pointer to queued spinlock structure
83 * Return: 1 if lock acquired, 0 if failed
84 */
queued_spin_trylock(struct qspinlock * lock)85 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
86 {
87 if (!atomic_read(&lock->val) &&
88 (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
89 return 1;
90 return 0;
91 }
92
93 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
94
95 /**
96 * queued_spin_lock - acquire a queued spinlock
97 * @lock: Pointer to queued spinlock structure
98 */
queued_spin_lock(struct qspinlock * lock)99 static __always_inline void queued_spin_lock(struct qspinlock *lock)
100 {
101 u32 val;
102
103 val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
104 if (likely(val == 0))
105 return;
106 queued_spin_lock_slowpath(lock, val);
107 }
108
109 #ifndef queued_spin_unlock
110 /**
111 * queued_spin_unlock - release a queued spinlock
112 * @lock : Pointer to queued spinlock structure
113 */
queued_spin_unlock(struct qspinlock * lock)114 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
115 {
116 /*
117 * smp_mb__before_atomic() in order to guarantee release semantics
118 */
119 smp_mb__before_atomic_dec();
120 atomic_sub(_Q_LOCKED_VAL, &lock->val);
121 }
122 #endif
123
124 /**
125 * queued_spin_unlock_wait - wait until current lock holder releases the lock
126 * @lock : Pointer to queued spinlock structure
127 *
128 * There is a very slight possibility of live-lock if the lockers keep coming
129 * and the waiter is just unfortunate enough to not see any unlock state.
130 */
queued_spin_unlock_wait(struct qspinlock * lock)131 static inline void queued_spin_unlock_wait(struct qspinlock *lock)
132 {
133 /* See queued_spin_is_locked() */
134 smp_mb();
135 while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
136 cpu_relax();
137 }
138
139 #ifndef virt_spin_lock
virt_spin_lock(struct qspinlock * lock)140 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
141 {
142 return false;
143 }
144 #endif
145
146 /*
147 * Initializier
148 */
149 #define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
150
151 /*
152 * Remapping spinlock architecture specific functions to the corresponding
153 * queued spinlock functions.
154 */
155 #define arch_spin_is_locked(l) queued_spin_is_locked(l)
156 #define arch_spin_is_contended(l) queued_spin_is_contended(l)
157 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
158 #define arch_spin_lock(l) queued_spin_lock(l)
159 #define arch_spin_trylock(l) queued_spin_trylock(l)
160 #define arch_spin_unlock(l) queued_spin_unlock(l)
161 #define arch_spin_lock_flags(l, f) queued_spin_lock(l)
162 #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
163
164 #endif /* __ASM_GENERIC_QSPINLOCK_H */
165