This source file includes following definitions.
- queued_spin_is_locked
- queued_spin_value_unlocked
- queued_spin_is_contended
- queued_spin_trylock
- queued_spin_lock
- queued_spin_unlock
- virt_spin_lock
1
2
3
4
5
6
7
8
9
10 #ifndef __ASM_GENERIC_QSPINLOCK_H
11 #define __ASM_GENERIC_QSPINLOCK_H
12
13 #include <asm-generic/qspinlock_types.h>
14
15
16
17
18
19
20 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
21 {
22
23
24
25
26 return atomic_read(&lock->val);
27 }
28
29
30
31
32
33
34
35
36
37
38
39 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
40 {
41 return !atomic_read(&lock.val);
42 }
43
44
45
46
47
48
49 static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
50 {
51 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
52 }
53
54
55
56
57
58 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
59 {
60 u32 val = atomic_read(&lock->val);
61
62 if (unlikely(val))
63 return 0;
64
65 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
66 }
67
68 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
69
70
71
72
73
74 static __always_inline void queued_spin_lock(struct qspinlock *lock)
75 {
76 u32 val = 0;
77
78 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
79 return;
80
81 queued_spin_lock_slowpath(lock, val);
82 }
83
84 #ifndef queued_spin_unlock
85
86
87
88
89 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
90 {
91
92
93
94 smp_store_release(&lock->locked, 0);
95 }
96 #endif
97
98 #ifndef virt_spin_lock
99 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
100 {
101 return false;
102 }
103 #endif
104
105
106
107
108
109 #define arch_spin_is_locked(l) queued_spin_is_locked(l)
110 #define arch_spin_is_contended(l) queued_spin_is_contended(l)
111 #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
112 #define arch_spin_lock(l) queued_spin_lock(l)
113 #define arch_spin_trylock(l) queued_spin_trylock(l)
114 #define arch_spin_unlock(l) queued_spin_unlock(l)
115
116 #endif