Searched refs:lock (Results 1 - 200 of 6223) sorted by relevance

1234567891011>>

/linux-4.1.27/include/linux/
H A Dspinlock_api_up.h19 #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
24 * flags straight, to suppress compiler warnings of unused lock
27 #define ___LOCK(lock) \
28 do { __acquire(lock); (void)(lock); } while (0)
30 #define __LOCK(lock) \
31 do { preempt_disable(); ___LOCK(lock); } while (0)
33 #define __LOCK_BH(lock) \
34 do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
36 #define __LOCK_IRQ(lock) \
37 do { local_irq_disable(); __LOCK(lock); } while (0)
39 #define __LOCK_IRQSAVE(lock, flags) \
40 do { local_irq_save(flags); __LOCK(lock); } while (0)
42 #define ___UNLOCK(lock) \
43 do { __release(lock); (void)(lock); } while (0)
45 #define __UNLOCK(lock) \
46 do { preempt_enable(); ___UNLOCK(lock); } while (0)
48 #define __UNLOCK_BH(lock) \
50 ___UNLOCK(lock); } while (0)
52 #define __UNLOCK_IRQ(lock) \
53 do { local_irq_enable(); __UNLOCK(lock); } while (0)
55 #define __UNLOCK_IRQRESTORE(lock, flags) \
56 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
58 #define _raw_spin_lock(lock) __LOCK(lock)
59 #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
60 #define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
61 #define _raw_read_lock(lock) __LOCK(lock)
62 #define _raw_write_lock(lock) __LOCK(lock)
63 #define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
64 #define _raw_read_lock_bh(lock) __LOCK_BH(lock)
65 #define _raw_write_lock_bh(lock) __LOCK_BH(lock)
66 #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
67 #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
68 #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
69 #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
70 #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
71 #define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
72 #define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
73 #define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
74 #define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
75 #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
76 #define _raw_spin_unlock(lock) __UNLOCK(lock)
77 #define _raw_read_unlock(lock) __UNLOCK(lock)
78 #define _raw_write_unlock(lock) __UNLOCK(lock)
79 #define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
80 #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
81 #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
82 #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
83 #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
84 #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
85 #define _raw_spin_unlock_irqrestore(lock, flags) \
86 __UNLOCK_IRQRESTORE(lock, flags)
87 #define _raw_read_unlock_irqrestore(lock, flags) \
88 __UNLOCK_IRQRESTORE(lock, flags)
89 #define _raw_write_unlock_irqrestore(lock, flags) \
90 __UNLOCK_IRQRESTORE(lock, flags)
H A Drwlock.h18 extern void __rwlock_init(rwlock_t *lock, const char *name,
20 # define rwlock_init(lock) \
24 __rwlock_init((lock), #lock, &__key); \
27 # define rwlock_init(lock) \
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
32 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock); variable
33 #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock); variable
36 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock); variable
37 #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock); variable
41 # define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
42 # define do_raw_read_lock_flags(lock, flags) \
43 do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
45 # define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
46 # define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
47 # define do_raw_write_lock_flags(lock, flags) \
48 do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
50 # define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
61 #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
62 #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
64 #define write_lock(lock) _raw_write_lock(lock)
65 #define read_lock(lock) _raw_read_lock(lock)
69 #define read_lock_irqsave(lock, flags) \
72 flags = _raw_read_lock_irqsave(lock); \
74 #define write_lock_irqsave(lock, flags) \
77 flags = _raw_write_lock_irqsave(lock); \
82 #define read_lock_irqsave(lock, flags) \
85 _raw_read_lock_irqsave(lock, flags); \
87 #define write_lock_irqsave(lock, flags) \
90 _raw_write_lock_irqsave(lock, flags); \
95 #define read_lock_irq(lock) _raw_read_lock_irq(lock)
96 #define read_lock_bh(lock) _raw_read_lock_bh(lock)
97 #define write_lock_irq(lock) _raw_write_lock_irq(lock)
98 #define write_lock_bh(lock) _raw_write_lock_bh(lock)
99 #define read_unlock(lock) _raw_read_unlock(lock)
100 #define write_unlock(lock) _raw_write_unlock(lock)
101 #define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
102 #define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
104 #define read_unlock_irqrestore(lock, flags) \
107 _raw_read_unlock_irqrestore(lock, flags); \
109 #define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
111 #define write_unlock_irqrestore(lock, flags) \
114 _raw_write_unlock_irqrestore(lock, flags); \
116 #define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
118 #define write_trylock_irqsave(lock, flags) \
121 write_trylock(lock) ? \
H A Drwlock_api_smp.h18 void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); variable
19 void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); variable
20 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); variable
21 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); variable
22 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); variable
23 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); variable
24 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock); variable
26 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock); variable
28 int __lockfunc _raw_read_trylock(rwlock_t *lock);
29 int __lockfunc _raw_write_trylock(rwlock_t *lock);
30 void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); variable
31 void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); variable
32 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); variable
33 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); variable
34 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); variable
35 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); variable
37 _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
38 __releases(lock); variable
40 _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
41 __releases(lock); variable
44 #define _raw_read_lock(lock) __raw_read_lock(lock)
48 #define _raw_write_lock(lock) __raw_write_lock(lock)
52 #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
56 #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
60 #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
64 #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
68 #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
72 #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
76 #define _raw_read_trylock(lock) __raw_read_trylock(lock)
80 #define _raw_write_trylock(lock) __raw_write_trylock(lock)
84 #define _raw_read_unlock(lock) __raw_read_unlock(lock)
88 #define _raw_write_unlock(lock) __raw_write_unlock(lock)
92 #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
96 #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
100 #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
104 #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
108 #define _raw_read_unlock_irqrestore(lock, flags) \
109 __raw_read_unlock_irqrestore(lock, flags)
113 #define _raw_write_unlock_irqrestore(lock, flags) \
114 __raw_write_unlock_irqrestore(lock, flags)
117 static inline int __raw_read_trylock(rwlock_t *lock) __raw_read_trylock() argument
120 if (do_raw_read_trylock(lock)) { __raw_read_trylock()
121 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); __raw_read_trylock()
128 static inline int __raw_write_trylock(rwlock_t *lock) __raw_write_trylock() argument
131 if (do_raw_write_trylock(lock)) { __raw_write_trylock()
132 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); __raw_write_trylock()
142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
146 static inline void __raw_read_lock(rwlock_t *lock) __raw_read_lock() argument
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); __raw_read_lock()
150 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); __raw_read_lock()
153 static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) __raw_read_lock_irqsave() argument
159 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); __raw_read_lock_irqsave()
160 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, __raw_read_lock_irqsave()
165 static inline void __raw_read_lock_irq(rwlock_t *lock) __raw_read_lock_irq() argument
169 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); __raw_read_lock_irq()
170 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); __raw_read_lock_irq()
173 static inline void __raw_read_lock_bh(rwlock_t *lock) __raw_read_lock_bh() argument
176 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); __raw_read_lock_bh()
177 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); __raw_read_lock_bh()
180 static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) __raw_write_lock_irqsave() argument
186 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_write_lock_irqsave()
187 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, __raw_write_lock_irqsave()
192 static inline void __raw_write_lock_irq(rwlock_t *lock) __raw_write_lock_irq() argument
196 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_write_lock_irq()
197 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); __raw_write_lock_irq()
200 static inline void __raw_write_lock_bh(rwlock_t *lock) __raw_write_lock_bh() argument
203 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_write_lock_bh()
204 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); __raw_write_lock_bh()
207 static inline void __raw_write_lock(rwlock_t *lock) __raw_write_lock() argument
210 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_write_lock()
211 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); __raw_write_lock()
216 static inline void __raw_write_unlock(rwlock_t *lock) __raw_write_unlock() argument
218 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_write_unlock()
219 do_raw_write_unlock(lock); __raw_write_unlock()
223 static inline void __raw_read_unlock(rwlock_t *lock) __raw_read_unlock() argument
225 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_read_unlock()
226 do_raw_read_unlock(lock); __raw_read_unlock()
231 __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __raw_read_unlock_irqrestore() argument
233 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_read_unlock_irqrestore()
234 do_raw_read_unlock(lock); __raw_read_unlock_irqrestore()
239 static inline void __raw_read_unlock_irq(rwlock_t *lock) __raw_read_unlock_irq() argument
241 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_read_unlock_irq()
242 do_raw_read_unlock(lock); __raw_read_unlock_irq()
247 static inline void __raw_read_unlock_bh(rwlock_t *lock) __raw_read_unlock_bh() argument
249 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_read_unlock_bh()
250 do_raw_read_unlock(lock); __raw_read_unlock_bh()
254 static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, __raw_write_unlock_irqrestore() argument
257 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_write_unlock_irqrestore()
258 do_raw_write_unlock(lock); __raw_write_unlock_irqrestore()
263 static inline void __raw_write_unlock_irq(rwlock_t *lock) __raw_write_unlock_irq() argument
265 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_write_unlock_irq()
266 do_raw_write_unlock(lock); __raw_write_unlock_irq()
271 static inline void __raw_write_unlock_bh(rwlock_t *lock) __raw_write_unlock_bh() argument
273 rwlock_release(&lock->dep_map, 1, _RET_IP_); __raw_write_unlock_bh()
274 do_raw_write_unlock(lock); __raw_write_unlock_bh()
H A Dspinlock_api_smp.h22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); variable
23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); variable
25 void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
26 __acquires(lock); variable
28 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
29 __acquires(lock); variable
30 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); variable
31 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
32 __acquires(lock); variable
34 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
35 __acquires(lock); variable
37 _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
38 __acquires(lock); variable
39 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
40 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
41 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); variable
42 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); variable
43 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); variable
45 _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
46 __releases(lock); variable
49 #define _raw_spin_lock(lock) __raw_spin_lock(lock)
53 #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
57 #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
61 #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
65 #define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
69 #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
73 #define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
77 #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
81 #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
85 #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
88 static inline int __raw_spin_trylock(raw_spinlock_t *lock) __raw_spin_trylock() argument
91 if (do_raw_spin_trylock(lock)) { __raw_spin_trylock()
92 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); __raw_spin_trylock()
102 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
106 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) __raw_spin_lock_irqsave() argument
112 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_spin_lock_irqsave()
116 * that interrupts are not re-enabled during lock-acquire: __raw_spin_lock_irqsave()
119 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); __raw_spin_lock_irqsave()
121 do_raw_spin_lock_flags(lock, &flags); __raw_spin_lock_irqsave()
126 static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) __raw_spin_lock_irq() argument
130 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_spin_lock_irq()
131 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); __raw_spin_lock_irq()
134 static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) __raw_spin_lock_bh() argument
137 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_spin_lock_bh()
138 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); __raw_spin_lock_bh()
141 static inline void __raw_spin_lock(raw_spinlock_t *lock) __raw_spin_lock() argument
144 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); __raw_spin_lock()
145 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); __raw_spin_lock()
150 static inline void __raw_spin_unlock(raw_spinlock_t *lock) __raw_spin_unlock() argument
152 spin_release(&lock->dep_map, 1, _RET_IP_); __raw_spin_unlock()
153 do_raw_spin_unlock(lock); __raw_spin_unlock()
157 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, __raw_spin_unlock_irqrestore() argument
160 spin_release(&lock->dep_map, 1, _RET_IP_); __raw_spin_unlock_irqrestore()
161 do_raw_spin_unlock(lock); __raw_spin_unlock_irqrestore()
166 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) __raw_spin_unlock_irq() argument
168 spin_release(&lock->dep_map, 1, _RET_IP_); __raw_spin_unlock_irq()
169 do_raw_spin_unlock(lock); __raw_spin_unlock_irq()
174 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) __raw_spin_unlock_bh() argument
176 spin_release(&lock->dep_map, 1, _RET_IP_); __raw_spin_unlock_bh()
177 do_raw_spin_unlock(lock); __raw_spin_unlock_bh()
181 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) __raw_spin_trylock_bh() argument
184 if (do_raw_spin_trylock(lock)) { __raw_spin_trylock_bh()
185 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); __raw_spin_trylock_bh()
H A Dspinlock_up.h28 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
30 lock->slock = 0; arch_spin_lock()
35 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) arch_spin_lock_flags() argument
38 lock->slock = 0; arch_spin_lock_flags()
42 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
44 char oldval = lock->slock; arch_spin_trylock()
46 lock->slock = 0; arch_spin_trylock()
52 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
55 lock->slock = 1; arch_spin_unlock()
61 #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
62 #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
63 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
64 #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
65 #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
66 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
69 #define arch_spin_is_locked(lock) ((void)(lock), 0)
71 # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
72 # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
73 # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
74 # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
77 #define arch_spin_is_contended(lock) (((void)(lock), 0))
79 #define arch_read_can_lock(lock) (((void)(lock), 1))
80 #define arch_write_can_lock(lock) (((void)(lock), 1))
82 #define arch_spin_unlock_wait(lock) \
83 do { cpu_relax(); } while (arch_spin_is_locked(lock))
H A Dspinlock.h64 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 # define raw_spin_lock_init(lock) \
99 __raw_spin_lock_init((lock), #lock, &__key); \
103 # define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
107 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
110 #define raw_spin_is_contended(lock) ((lock)->break_lock)
114 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116 #define raw_spin_is_contended(lock) (((void)(lock), 0))
134 * Place this after a lock-acquisition primitive to guarantee that
137 * UNLOCK and LOCK operate on the same lock variable.
145 * @lock: the spinlock in question.
147 #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
150 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); variable
151 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
152 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
153 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); variable
155 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) __acquires()
157 __acquire(lock); __acquires()
158 arch_spin_lock(&lock->raw_lock); __acquires()
162 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) __acquires()
164 __acquire(lock); __acquires()
165 arch_spin_lock_flags(&lock->raw_lock, *flags); __acquires()
168 static inline int do_raw_spin_trylock(raw_spinlock_t *lock) do_raw_spin_trylock() argument
170 return arch_spin_trylock(&(lock)->raw_lock); do_raw_spin_trylock()
173 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) __releases()
175 arch_spin_unlock(&lock->raw_lock); __releases()
176 __release(lock); __releases()
186 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
188 #define raw_spin_lock(lock) _raw_spin_lock(lock)
191 # define raw_spin_lock_nested(lock, subclass) \
192 _raw_spin_lock_nested(lock, subclass)
193 # define raw_spin_lock_bh_nested(lock, subclass) \
194 _raw_spin_lock_bh_nested(lock, subclass)
196 # define raw_spin_lock_nest_lock(lock, nest_lock) \
199 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
207 # define raw_spin_lock_nested(lock, subclass) \
208 _raw_spin_lock(((void)(subclass), (lock)))
209 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
210 # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
215 #define raw_spin_lock_irqsave(lock, flags) \
218 flags = _raw_spin_lock_irqsave(lock); \
222 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
225 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
228 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
231 flags = _raw_spin_lock_irqsave(lock); \
237 #define raw_spin_lock_irqsave(lock, flags) \
240 _raw_spin_lock_irqsave(lock, flags); \
243 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
244 raw_spin_lock_irqsave(lock, flags)
248 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
249 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
250 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
251 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
253 #define raw_spin_unlock_irqrestore(lock, flags) \
256 _raw_spin_unlock_irqrestore(lock, flags); \
258 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
260 #define raw_spin_trylock_bh(lock) \
261 __cond_lock(lock, _raw_spin_trylock_bh(lock))
263 #define raw_spin_trylock_irq(lock) \
266 raw_spin_trylock(lock) ? \
270 #define raw_spin_trylock_irqsave(lock, flags) \
273 raw_spin_trylock(lock) ? \
279 * @lock: the spinlock in question.
281 #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
299 static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) spinlock_check() argument
301 return &lock->rlock; spinlock_check()
310 static inline void spin_lock(spinlock_t *lock) spin_lock() argument
312 raw_spin_lock(&lock->rlock); spin_lock()
315 static inline void spin_lock_bh(spinlock_t *lock) spin_lock_bh() argument
317 raw_spin_lock_bh(&lock->rlock); spin_lock_bh()
320 static inline int spin_trylock(spinlock_t *lock) spin_trylock() argument
322 return raw_spin_trylock(&lock->rlock); spin_trylock()
325 #define spin_lock_nested(lock, subclass) \
327 raw_spin_lock_nested(spinlock_check(lock), subclass); \
330 #define spin_lock_bh_nested(lock, subclass) \
332 raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
335 #define spin_lock_nest_lock(lock, nest_lock) \
337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
340 static inline void spin_lock_irq(spinlock_t *lock) spin_lock_irq() argument
342 raw_spin_lock_irq(&lock->rlock); spin_lock_irq()
345 #define spin_lock_irqsave(lock, flags) \
347 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
350 #define spin_lock_irqsave_nested(lock, flags, subclass) \
352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
355 static inline void spin_unlock(spinlock_t *lock) spin_unlock() argument
357 raw_spin_unlock(&lock->rlock); spin_unlock()
360 static inline void spin_unlock_bh(spinlock_t *lock) spin_unlock_bh() argument
362 raw_spin_unlock_bh(&lock->rlock); spin_unlock_bh()
365 static inline void spin_unlock_irq(spinlock_t *lock) spin_unlock_irq() argument
367 raw_spin_unlock_irq(&lock->rlock); spin_unlock_irq()
370 static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) spin_unlock_irqrestore() argument
372 raw_spin_unlock_irqrestore(&lock->rlock, flags); spin_unlock_irqrestore()
375 static inline int spin_trylock_bh(spinlock_t *lock) spin_trylock_bh() argument
377 return raw_spin_trylock_bh(&lock->rlock); spin_trylock_bh()
380 static inline int spin_trylock_irq(spinlock_t *lock) spin_trylock_irq() argument
382 return raw_spin_trylock_irq(&lock->rlock); spin_trylock_irq()
385 #define spin_trylock_irqsave(lock, flags) \
387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
390 static inline void spin_unlock_wait(spinlock_t *lock) spin_unlock_wait() argument
392 raw_spin_unlock_wait(&lock->rlock); spin_unlock_wait()
395 static inline int spin_is_locked(spinlock_t *lock) spin_is_locked() argument
397 return raw_spin_is_locked(&lock->rlock); spin_is_locked()
400 static inline int spin_is_contended(spinlock_t *lock) spin_is_contended() argument
402 return raw_spin_is_contended(&lock->rlock); spin_is_contended()
405 static inline int spin_can_lock(spinlock_t *lock) spin_can_lock() argument
407 return raw_spin_can_lock(&lock->rlock); spin_can_lock()
410 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
418 * atomic_dec_and_lock - lock on reaching reference count zero
420 * @lock: the spinlock in question
423 * @lock. Returns false for all other cases.
425 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
426 #define atomic_dec_and_lock(atomic, lock) \
427 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
H A Dosq_lock.h5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
10 int locked; /* 1 if lock acquired */
27 static inline void osq_lock_init(struct optimistic_spin_queue *lock) osq_lock_init() argument
29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); osq_lock_init()
32 extern bool osq_lock(struct optimistic_spin_queue *lock);
33 extern void osq_unlock(struct optimistic_spin_queue *lock);
H A Dmutex.h40 * that make lock debugging easier and faster:
59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
99 static inline void mutex_destroy(struct mutex *lock) {} mutex_destroy() argument
119 extern void __mutex_init(struct mutex *lock, const char *name,
124 * @lock: the mutex to be queried
128 static inline int mutex_is_locked(struct mutex *lock) mutex_is_locked() argument
130 return atomic_read(&lock->count) != 1; mutex_is_locked()
138 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
139 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
141 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
143 extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
146 #define mutex_lock(lock) mutex_lock_nested(lock, 0)
147 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
148 #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
150 #define mutex_lock_nest_lock(lock, nest_lock) \
153 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
157 extern void mutex_lock(struct mutex *lock);
158 extern int __must_check mutex_lock_interruptible(struct mutex *lock);
159 extern int __must_check mutex_lock_killable(struct mutex *lock);
161 # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
162 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
163 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
164 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
173 extern int mutex_trylock(struct mutex *lock);
174 extern void mutex_unlock(struct mutex *lock);
176 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
H A Dlockdep.h40 * on rq->lock. double_rq_lock() acquires this highly competitive with
48 * static locks we use the lock address itself as the key.)
63 * The lock-class itself:
72 * global list of all lock-classes:
87 * These fields represent a directed graph of lock dependencies,
147 * Map the lock object (the lock instance) to the lock-class object.
148 * This is embedded into specific lock instances:
179 * Every lock has a list of other locks that were taken after it.
190 * bit 0 is reused to indicate if the lock has been accessed in BFS.
196 * We record lock dependency chains, so that we can cache them:
239 * The lock-stack is unified in that the lock chains of interrupt
242 * context, and we also keep do not add cross-context lock
243 * dependencies - the lock usage graph walking covers that area
266 extern void lockdep_reset_lock(struct lockdep_map *lock);
279 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
290 * Reinitialize a lock key - for cases where there is special locking or
295 #define lockdep_set_class(lock, key) \
296 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
297 #define lockdep_set_class_and_name(lock, key, name) \
298 lockdep_init_map(&(lock)->dep_map, name, key, 0)
299 #define lockdep_set_class_and_subclass(lock, key, sub) \
300 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
301 #define lockdep_set_subclass(lock, sub) \
302 lockdep_init_map(&(lock)->dep_map, #lock, \
303 (lock)->dep_map.key, sub)
305 #define lockdep_set_novalidate_class(lock) \
306 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
310 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
312 static inline int lockdep_match_key(struct lockdep_map *lock, lockdep_match_key() argument
315 return lock->key == key; lockdep_match_key()
319 * Acquire a lock.
332 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
336 extern void lock_release(struct lockdep_map *lock, int nested,
339 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
341 extern int lock_is_held(struct lockdep_map *lock);
343 extern void lock_set_class(struct lockdep_map *lock, const char *name,
347 static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_subclass() argument
350 lock_set_class(lock, lock->name, lock->key, subclass, ip); lock_set_subclass()
390 # define lockdep_init_map(lock, name, key, sub) \
392 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
393 # define lockdep_set_class_and_name(lock, key, name) \
395 #define lockdep_set_class_and_subclass(lock, key, sub) \
397 #define lockdep_set_subclass(lock, sub) do { } while (0)
399 #define lockdep_set_novalidate_class(lock) do { } while (0)
427 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
428 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
430 #define LOCK_CONTENDED(_lock, try, lock) \
434 lock(_lock); \
444 #define LOCK_CONTENDED(_lock, try, lock) \
445 lock(_lock)
454 * that interrupts are not re-enabled during lock-acquire:
456 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
457 LOCK_CONTENDED((_lock), (try), (lock))
461 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
475 * For trivial one-depth nesting of a lock-class, the following
477 * of nesting should define their own lock-nesting subclasses.)
483 * on the per lock-class debug mode:
517 # define might_lock(lock) \
519 typecheck(struct lockdep_map *, &(lock)->dep_map); \
520 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
521 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
523 # define might_lock_read(lock) \
525 typecheck(struct lockdep_map *, &(lock)->dep_map); \
526 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
527 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
530 # define might_lock(lock) do { } while (0)
531 # define might_lock_read(lock) do { } while (0)
H A Dnfs_fs_i.h7 * NFS lock info
H A Dsemaphore.h17 raw_spinlock_t lock; member in struct:semaphore
24 .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
36 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); sema_init()
H A Dfs_struct.h10 spinlock_t lock; member in struct:fs_struct
28 spin_lock(&fs->lock); get_fs_root()
31 spin_unlock(&fs->lock); get_fs_root()
36 spin_lock(&fs->lock); get_fs_pwd()
39 spin_unlock(&fs->lock); get_fs_pwd()
H A Dww_mutex.h77 * @lock: the mutex to be initialized
85 static inline void ww_mutex_init(struct ww_mutex *lock, ww_mutex_init() argument
88 __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); ww_mutex_init()
89 lock->ctx = NULL; ww_mutex_init()
91 lock->ww_class = ww_class; ww_mutex_init()
103 * a given lock class. Deadlocks will be detected and handled with the
113 * to the usual locking rules between different lock classes.
146 * Marks the end of the acquire phase, any further w/w mutex lock calls using
189 extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
191 extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
196 * @lock: the mutex to be acquired
197 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
202 * wait/wound algorithm. If the lock isn't immediately avaiable this function
205 * same lock with the same context twice is also detected and signalled by
209 * the given context and then wait for this contending lock to be available by
211 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
219 * of the same w/w lock class as was used to initialize the acquire context.
223 static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock() argument
226 return __ww_mutex_lock(lock, ctx); ww_mutex_lock()
228 mutex_lock(&lock->base); ww_mutex_lock()
234 * @lock: the mutex to be acquired
240 * wait/wound algorithm. If the lock isn't immediately avaiable this function
243 * same lock with the same context twice is also detected and signalled by
245 * signal arrives while waiting for the lock then this function returns -EINTR.
248 * the given context and then wait for this contending lock to be available by
250 * not acquire this lock and proceed with trying to acquire further w/w mutexes
258 * of the same w/w lock class as was used to initialize the acquire context.
262 static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, ww_mutex_lock_interruptible() argument
266 return __ww_mutex_lock_interruptible(lock, ctx); ww_mutex_lock_interruptible()
268 return mutex_lock_interruptible(&lock->base); ww_mutex_lock_interruptible()
273 * @lock: the mutex to be acquired
277 * will sleep until the lock becomes available.
280 * context and then call this function on the contended lock.
290 * Note that the slowpath lock acquiring can also be done by calling
295 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock_slow() argument
301 ret = ww_mutex_lock(lock, ctx); ww_mutex_lock_slow()
307 * @lock: the mutex to be acquired
311 * will sleep until the lock becomes available and returns 0 when the lock has
312 * been acquired. If a signal arrives while waiting for the lock then this
316 * context and then call this function on the contended lock.
326 * Note that the slowpath lock acquiring can also be done by calling
331 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, ww_mutex_lock_slow_interruptible() argument
337 return ww_mutex_lock_interruptible(lock, ctx); ww_mutex_lock_slow_interruptible()
340 extern void ww_mutex_unlock(struct ww_mutex *lock);
344 * @lock: mutex to lock
349 static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) ww_mutex_trylock() argument
351 return mutex_trylock(&lock->base); ww_mutex_trylock()
356 * @lock: the mutex to be destroyed
362 static inline void ww_mutex_destroy(struct ww_mutex *lock) ww_mutex_destroy() argument
364 mutex_destroy(&lock->base); ww_mutex_destroy()
369 * @lock: the mutex to be queried
373 static inline bool ww_mutex_is_locked(struct ww_mutex *lock) ww_mutex_is_locked() argument
375 return mutex_is_locked(&lock->base); ww_mutex_is_locked()
H A Drtmutex.h80 * @lock: the mutex to be queried
84 static inline int rt_mutex_is_locked(struct rt_mutex *lock) rt_mutex_is_locked() argument
86 return lock->owner != NULL; rt_mutex_is_locked()
89 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
90 extern void rt_mutex_destroy(struct rt_mutex *lock);
92 extern void rt_mutex_lock(struct rt_mutex *lock);
93 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
94 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
97 extern int rt_mutex_trylock(struct rt_mutex *lock);
99 extern void rt_mutex_unlock(struct rt_mutex *lock);
H A Dratelimit.h11 raw_spinlock_t lock; /* protect the state */ member in struct:ratelimit_state
21 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
37 raw_spin_lock_init(&rs->lock); ratelimit_state_init()
H A Dseqlock.h5 * lock for data where the reader wants a consistent set of information
13 * from going forward. Unlike the regular rwlock, the read lock here is
57 * Make sure we are not reinitializing a held lock: __seqcount_init()
283 spinlock_t lock; member in struct:__anon12423
293 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
299 spin_lock_init(&(x)->lock); \
325 spin_lock(&sl->lock); write_seqlock()
332 spin_unlock(&sl->lock); write_sequnlock()
337 spin_lock_bh(&sl->lock); write_seqlock_bh()
344 spin_unlock_bh(&sl->lock); write_sequnlock_bh()
349 spin_lock_irq(&sl->lock); write_seqlock_irq()
356 spin_unlock_irq(&sl->lock); write_sequnlock_irq()
363 spin_lock_irqsave(&sl->lock, flags); __write_seqlock_irqsave()
368 #define write_seqlock_irqsave(lock, flags) \
369 do { flags = __write_seqlock_irqsave(lock); } while (0)
375 spin_unlock_irqrestore(&sl->lock, flags); write_sequnlock_irqrestore()
385 spin_lock(&sl->lock); read_seqlock_excl()
390 spin_unlock(&sl->lock); read_sequnlock_excl()
395 * @lock: sequence lock
398 * First try it once optimistically without taking the lock. If that fails,
399 * take the lock. The sequence number is also used as a marker for deciding
403 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) read_seqbegin_or_lock() argument
406 *seq = read_seqbegin(lock); read_seqbegin_or_lock()
408 read_seqlock_excl(lock); read_seqbegin_or_lock()
411 static inline int need_seqretry(seqlock_t *lock, int seq) need_seqretry() argument
413 return !(seq & 1) && read_seqretry(lock, seq); need_seqretry()
416 static inline void done_seqretry(seqlock_t *lock, int seq) done_seqretry() argument
419 read_sequnlock_excl(lock); done_seqretry()
424 spin_lock_bh(&sl->lock); read_seqlock_excl_bh()
429 spin_unlock_bh(&sl->lock); read_sequnlock_excl_bh()
434 spin_lock_irq(&sl->lock); read_seqlock_excl_irq()
439 spin_unlock_irq(&sl->lock); read_sequnlock_excl_irq()
446 spin_lock_irqsave(&sl->lock, flags); __read_seqlock_excl_irqsave()
450 #define read_seqlock_excl_irqsave(lock, flags) \
451 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
456 spin_unlock_irqrestore(&sl->lock, flags); read_sequnlock_excl_irqrestore()
460 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) read_seqbegin_or_lock_irqsave() argument
465 *seq = read_seqbegin(lock); read_seqbegin_or_lock_irqsave()
467 read_seqlock_excl_irqsave(lock, flags); read_seqbegin_or_lock_irqsave()
473 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) done_seqretry_irqrestore() argument
476 read_sequnlock_excl_irqrestore(lock, flags); done_seqretry_irqrestore()
H A Dkbd_kern.h29 #define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */
30 #define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */
31 #define VC_CTRLLOCK KG_CTRL /* control lock mode */
32 #define VC_ALTLOCK KG_ALT /* alt lock mode */
33 #define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */
34 #define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */
35 #define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */
36 #define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */
45 #define VC_SCROLLOCK 0 /* scroll-lock mode */
46 #define VC_NUMLOCK 1 /* numeric lock mode */
H A Drwsem.h32 struct optimistic_spin_queue osq; /* spinner MCS lock */
98 * lock.
106 * lock for reading
116 * lock for writing
126 * release a read lock
131 * release a write lock
136 * downgrade write lock to read lock
144 * lock instance multiple times), but multiple locks of the
145 * same lock class might be taken, if the order of the locks
150 * the explicit definition of lock class keys and the use of
151 * lockdep_set_class() at lock initialization time.
165 * Take/release a lock when not the owner will release it.
H A Diocontext.h46 * queue lock but the returned icq is valid only until the queue lock is
53 * - ioc lock nests inside q lock.
55 * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
56 * q->icq_list and icq->q_node by q lock.
58 * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
59 * itself is protected by q lock. However, both the indexes and icq
61 * the q lock.
68 * locks. Due to the lock ordering, q exit is simple but ioc exit
69 * requires reverse-order double lock dance.
102 /* all the fields below are protected by this lock */
103 spinlock_t lock; member in struct:io_context
/linux-4.1.27/arch/metag/include/asm/
H A Dspinlock.h10 #define arch_spin_unlock_wait(lock) \
11 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
13 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
15 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
16 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
18 #define arch_spin_relax(lock) cpu_relax()
19 #define arch_read_relax(lock) cpu_relax()
20 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_lock1.h7 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
12 ret = lock->lock; arch_spin_is_locked()
17 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
24 if (lock->lock == 0) { arch_spin_lock()
26 lock->lock = 1; arch_spin_lock()
32 WARN_ON(lock->lock != 1); arch_spin_lock()
35 /* Returns 0 if failed to acquire lock */ arch_spin_trylock()
36 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
42 ret = lock->lock; arch_spin_trylock()
45 lock->lock = 1; arch_spin_trylock()
51 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
54 WARN_ON(!lock->lock); arch_spin_unlock()
55 lock->lock = 0; arch_spin_unlock()
63 * just write zero since the lock is exclusively held.
73 if (rw->lock == 0) { arch_write_lock()
75 rw->lock = 0x80000000; arch_write_lock()
81 WARN_ON(rw->lock != 0x80000000); arch_write_lock()
90 ret = rw->lock; arch_write_trylock()
93 rw->lock = 0x80000000; arch_write_trylock()
103 WARN_ON(rw->lock != 0x80000000); arch_write_unlock()
104 rw->lock = 0; arch_write_unlock()
113 ret = rw->lock; arch_write_can_lock()
119 * - Exclusively load the lock value.
121 * - Store new lock value if positive, and we still own this location.
136 ret = rw->lock; arch_read_lock()
139 rw->lock = ret + 1; arch_read_lock()
154 ret = rw->lock--; arch_read_unlock()
165 ret = rw->lock; arch_read_trylock()
168 rw->lock = ret + 1; arch_read_trylock()
180 ret = rw->lock; arch_read_can_lock()
H A Dspinlock_types.h9 volatile unsigned int lock; member in struct:__anon1847
15 volatile unsigned int lock; member in struct:__anon1848
H A Dspinlock_lnkget.h10 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
19 : "da" (&lock->lock) arch_spin_is_locked()
24 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
38 : "da" (&lock->lock) arch_spin_lock()
44 /* Returns 0 if failed to acquire lock */ arch_spin_trylock()
45 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
60 : "da" (&lock->lock) arch_spin_trylock()
68 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
74 : "da" (&lock->lock), "da" (0) arch_spin_unlock()
83 * just write zero since the lock is exclusively held.
100 : "da" (&rw->lock), "bd" (0x80000000) arch_write_lock()
121 : "da" (&rw->lock), "bd" (0x80000000) arch_write_trylock()
135 : "da" (&rw->lock), "da" (0) arch_write_unlock()
149 : "da" (&rw->lock) arch_write_can_lock()
156 * - Exclusively load the lock value.
158 * - Store new lock value if positive, and we still own this location.
179 : "da" (&rw->lock) arch_read_lock()
199 : "da" (&rw->lock) arch_read_unlock()
219 : "da" (&rw->lock) arch_read_trylock()
237 : "da" (&rw->lock), "bd" (0x80000000) arch_read_can_lock()
242 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
243 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
245 #define arch_spin_relax(lock) cpu_relax()
246 #define arch_read_relax(lock) cpu_relax()
247 #define arch_write_relax(lock) cpu_relax()
H A Dglobal_lock.h7 * __global_lock1() - Acquire global voluntary lock (LOCK1).
10 * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable
12 * so that the compiler cannot reorder memory accesses across the lock.
15 * locks until the voluntary lock is released with @__global_unlock1, but they
31 * __global_unlock1() - Release global voluntary lock (LOCK1).
34 * Releases the Meta global voluntary lock (LOCK1) acquired with
52 * __global_lock2() - Acquire global exclusive lock (LOCK2).
55 * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2),
57 * the atomic lock (system event) and to enforce a compiler barrier so that the
58 * compiler cannot reorder memory accesses across the lock.
77 * __global_unlock2() - Release global exclusive lock (LOCK2).
80 * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock
81 * acquired with @__global_lock2, also taking care to release the atomic lock
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_lock.c45 void ttm_lock_init(struct ttm_lock *lock) ttm_lock_init() argument
47 spin_lock_init(&lock->lock); ttm_lock_init()
48 init_waitqueue_head(&lock->queue); ttm_lock_init()
49 lock->rw = 0; ttm_lock_init()
50 lock->flags = 0; ttm_lock_init()
51 lock->kill_takers = false; ttm_lock_init()
52 lock->signal = SIGKILL; ttm_lock_init()
56 void ttm_read_unlock(struct ttm_lock *lock) ttm_read_unlock() argument
58 spin_lock(&lock->lock); ttm_read_unlock()
59 if (--lock->rw == 0) ttm_read_unlock()
60 wake_up_all(&lock->queue); ttm_read_unlock()
61 spin_unlock(&lock->lock); ttm_read_unlock()
65 static bool __ttm_read_lock(struct ttm_lock *lock) __ttm_read_lock() argument
69 spin_lock(&lock->lock); __ttm_read_lock()
70 if (unlikely(lock->kill_takers)) { __ttm_read_lock()
71 send_sig(lock->signal, current, 0); __ttm_read_lock()
72 spin_unlock(&lock->lock); __ttm_read_lock()
75 if (lock->rw >= 0 && lock->flags == 0) { __ttm_read_lock()
76 ++lock->rw; __ttm_read_lock()
79 spin_unlock(&lock->lock); __ttm_read_lock()
83 int ttm_read_lock(struct ttm_lock *lock, bool interruptible) ttm_read_lock() argument
88 ret = wait_event_interruptible(lock->queue, ttm_read_lock()
89 __ttm_read_lock(lock)); ttm_read_lock()
91 wait_event(lock->queue, __ttm_read_lock(lock)); ttm_read_lock()
96 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) __ttm_read_trylock() argument
102 spin_lock(&lock->lock); __ttm_read_trylock()
103 if (unlikely(lock->kill_takers)) { __ttm_read_trylock()
104 send_sig(lock->signal, current, 0); __ttm_read_trylock()
105 spin_unlock(&lock->lock); __ttm_read_trylock()
108 if (lock->rw >= 0 && lock->flags == 0) { __ttm_read_trylock()
109 ++lock->rw; __ttm_read_trylock()
112 } else if (lock->flags == 0) { __ttm_read_trylock()
115 spin_unlock(&lock->lock); __ttm_read_trylock()
120 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) ttm_read_trylock() argument
127 (lock->queue, __ttm_read_trylock(lock, &locked)); ttm_read_trylock()
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); ttm_read_trylock()
139 void ttm_write_unlock(struct ttm_lock *lock) ttm_write_unlock() argument
141 spin_lock(&lock->lock); ttm_write_unlock()
142 lock->rw = 0; ttm_write_unlock()
143 wake_up_all(&lock->queue); ttm_write_unlock()
144 spin_unlock(&lock->lock); ttm_write_unlock()
148 static bool __ttm_write_lock(struct ttm_lock *lock) __ttm_write_lock() argument
152 spin_lock(&lock->lock); __ttm_write_lock()
153 if (unlikely(lock->kill_takers)) { __ttm_write_lock()
154 send_sig(lock->signal, current, 0); __ttm_write_lock()
155 spin_unlock(&lock->lock); __ttm_write_lock()
158 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { __ttm_write_lock()
159 lock->rw = -1; __ttm_write_lock()
160 lock->flags &= ~TTM_WRITE_LOCK_PENDING; __ttm_write_lock()
163 lock->flags |= TTM_WRITE_LOCK_PENDING; __ttm_write_lock()
165 spin_unlock(&lock->lock); __ttm_write_lock()
169 int ttm_write_lock(struct ttm_lock *lock, bool interruptible) ttm_write_lock() argument
174 ret = wait_event_interruptible(lock->queue, ttm_write_lock()
175 __ttm_write_lock(lock)); ttm_write_lock()
177 spin_lock(&lock->lock); ttm_write_lock()
178 lock->flags &= ~TTM_WRITE_LOCK_PENDING; ttm_write_lock()
179 wake_up_all(&lock->queue); ttm_write_lock()
180 spin_unlock(&lock->lock); ttm_write_lock()
183 wait_event(lock->queue, __ttm_read_lock(lock)); ttm_write_lock()
189 static int __ttm_vt_unlock(struct ttm_lock *lock) __ttm_vt_unlock() argument
193 spin_lock(&lock->lock); __ttm_vt_unlock()
194 if (unlikely(!(lock->flags & TTM_VT_LOCK))) __ttm_vt_unlock()
196 lock->flags &= ~TTM_VT_LOCK; __ttm_vt_unlock()
197 wake_up_all(&lock->queue); __ttm_vt_unlock()
198 spin_unlock(&lock->lock); __ttm_vt_unlock()
206 struct ttm_lock *lock = container_of(base, struct ttm_lock, base); ttm_vt_lock_remove() local
210 ret = __ttm_vt_unlock(lock); ttm_vt_lock_remove()
214 static bool __ttm_vt_lock(struct ttm_lock *lock) __ttm_vt_lock() argument
218 spin_lock(&lock->lock); __ttm_vt_lock()
219 if (lock->rw == 0) { __ttm_vt_lock()
220 lock->flags &= ~TTM_VT_LOCK_PENDING; __ttm_vt_lock()
221 lock->flags |= TTM_VT_LOCK; __ttm_vt_lock()
224 lock->flags |= TTM_VT_LOCK_PENDING; __ttm_vt_lock()
226 spin_unlock(&lock->lock); __ttm_vt_lock()
230 int ttm_vt_lock(struct ttm_lock *lock, ttm_vt_lock() argument
237 ret = wait_event_interruptible(lock->queue, ttm_vt_lock()
238 __ttm_vt_lock(lock)); ttm_vt_lock()
240 spin_lock(&lock->lock); ttm_vt_lock()
241 lock->flags &= ~TTM_VT_LOCK_PENDING; ttm_vt_lock()
242 wake_up_all(&lock->queue); ttm_vt_lock()
243 spin_unlock(&lock->lock); ttm_vt_lock()
247 wait_event(lock->queue, __ttm_vt_lock(lock)); ttm_vt_lock()
251 * make sure the lock is released if the client dies ttm_vt_lock()
255 ret = ttm_base_object_init(tfile, &lock->base, false, ttm_vt_lock()
258 (void)__ttm_vt_unlock(lock); ttm_vt_lock()
260 lock->vt_holder = tfile; ttm_vt_lock()
266 int ttm_vt_unlock(struct ttm_lock *lock) ttm_vt_unlock() argument
268 return ttm_ref_object_base_unref(lock->vt_holder, ttm_vt_unlock()
269 lock->base.hash.key, TTM_REF_USAGE); ttm_vt_unlock()
273 void ttm_suspend_unlock(struct ttm_lock *lock) ttm_suspend_unlock() argument
275 spin_lock(&lock->lock); ttm_suspend_unlock()
276 lock->flags &= ~TTM_SUSPEND_LOCK; ttm_suspend_unlock()
277 wake_up_all(&lock->queue); ttm_suspend_unlock()
278 spin_unlock(&lock->lock); ttm_suspend_unlock()
282 static bool __ttm_suspend_lock(struct ttm_lock *lock) __ttm_suspend_lock() argument
286 spin_lock(&lock->lock); __ttm_suspend_lock()
287 if (lock->rw == 0) { __ttm_suspend_lock()
288 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; __ttm_suspend_lock()
289 lock->flags |= TTM_SUSPEND_LOCK; __ttm_suspend_lock()
292 lock->flags |= TTM_SUSPEND_LOCK_PENDING; __ttm_suspend_lock()
294 spin_unlock(&lock->lock); __ttm_suspend_lock()
298 void ttm_suspend_lock(struct ttm_lock *lock) ttm_suspend_lock() argument
300 wait_event(lock->queue, __ttm_suspend_lock(lock)); ttm_suspend_lock()
/linux-4.1.27/arch/alpha/include/asm/
H A Dspinlock.h8 * Simple spin lock operations. There are two variants, one clears IRQ's
14 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
15 #define arch_spin_is_locked(x) ((x)->lock != 0)
17 do { cpu_relax(); } while ((x)->lock)
19 static inline void arch_spin_unlock(arch_spinlock_t * lock) arch_spin_unlock() argument
22 lock->lock = 0; arch_spin_unlock()
25 static inline void arch_spin_lock(arch_spinlock_t * lock) arch_spin_lock() argument
41 : "=&r" (tmp), "=m" (lock->lock) arch_spin_lock()
42 : "m"(lock->lock) : "memory"); arch_spin_lock()
45 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
47 return !test_and_set_bit(0, &lock->lock); arch_spin_trylock()
52 static inline int arch_read_can_lock(arch_rwlock_t *lock) arch_read_can_lock() argument
54 return (lock->lock & 1) == 0; arch_read_can_lock()
57 static inline int arch_write_can_lock(arch_rwlock_t *lock) arch_write_can_lock() argument
59 return lock->lock == 0; arch_write_can_lock()
62 static inline void arch_read_lock(arch_rwlock_t *lock) arch_read_lock() argument
78 : "=m" (*lock), "=&r" (regx) arch_read_lock()
79 : "m" (*lock) : "memory"); arch_read_lock()
82 static inline void arch_write_lock(arch_rwlock_t *lock) arch_write_lock() argument
98 : "=m" (*lock), "=&r" (regx) arch_write_lock()
99 : "m" (*lock) : "memory"); arch_write_lock()
102 static inline int arch_read_trylock(arch_rwlock_t * lock) arch_read_trylock() argument
118 : "=m" (*lock), "=&r" (regx), "=&r" (success) arch_read_trylock()
119 : "m" (*lock) : "memory"); arch_read_trylock()
124 static inline int arch_write_trylock(arch_rwlock_t * lock) arch_write_trylock() argument
140 : "=m" (*lock), "=&r" (regx), "=&r" (success) arch_write_trylock()
141 : "m" (*lock) : "memory"); arch_write_trylock()
146 static inline void arch_read_unlock(arch_rwlock_t * lock) arch_read_unlock() argument
158 : "=m" (*lock), "=&r" (regx) arch_read_unlock()
159 : "m" (*lock) : "memory"); arch_read_unlock()
162 static inline void arch_write_unlock(arch_rwlock_t * lock) arch_write_unlock() argument
165 lock->lock = 0; arch_write_unlock()
168 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
169 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
H A Dspinlock_types.h9 volatile unsigned int lock; member in struct:__anon78
15 volatile unsigned int lock; member in struct:__anon79
/linux-4.1.27/arch/blackfin/include/asm/
H A Dspinlock.h27 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
29 return __raw_spin_is_locked_asm(&lock->lock); arch_spin_is_locked()
32 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
34 __raw_spin_lock_asm(&lock->lock); arch_spin_lock()
37 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
39 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
41 return __raw_spin_trylock_asm(&lock->lock); arch_spin_trylock()
44 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
46 __raw_spin_unlock_asm(&lock->lock); arch_spin_unlock()
49 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
51 while (arch_spin_is_locked(lock)) arch_spin_unlock_wait()
57 return __raw_uncached_fetch_asm(&rw->lock) > 0; arch_read_can_lock()
62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; arch_write_can_lock()
67 __raw_read_lock_asm(&rw->lock); arch_read_lock()
70 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
74 return __raw_read_trylock_asm(&rw->lock); arch_read_trylock()
79 __raw_read_unlock_asm(&rw->lock); arch_read_unlock()
84 __raw_write_lock_asm(&rw->lock); arch_write_lock()
87 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
91 return __raw_write_trylock_asm(&rw->lock); arch_write_trylock()
96 __raw_write_unlock_asm(&rw->lock); arch_write_unlock()
99 #define arch_spin_relax(lock) cpu_relax()
100 #define arch_read_relax(lock) cpu_relax()
101 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_types.h17 volatile unsigned int lock; member in struct:__anon332
23 volatile unsigned int lock; member in struct:__anon333
/linux-4.1.27/kernel/locking/
H A Dspinlock_debug.c16 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, __raw_spin_lock_init() argument
21 * Make sure we are not reinitializing a held lock: __raw_spin_lock_init()
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); __raw_spin_lock_init()
24 lockdep_init_map(&lock->dep_map, name, key, 0); __raw_spin_lock_init()
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; __raw_spin_lock_init()
27 lock->magic = SPINLOCK_MAGIC; __raw_spin_lock_init()
28 lock->owner = SPINLOCK_OWNER_INIT; __raw_spin_lock_init()
29 lock->owner_cpu = -1; __raw_spin_lock_init()
34 void __rwlock_init(rwlock_t *lock, const char *name, __rwlock_init() argument
39 * Make sure we are not reinitializing a held lock: __rwlock_init()
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); __rwlock_init()
42 lockdep_init_map(&lock->dep_map, name, key, 0); __rwlock_init()
44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; __rwlock_init()
45 lock->magic = RWLOCK_MAGIC; __rwlock_init()
46 lock->owner = SPINLOCK_OWNER_INIT; __rwlock_init()
47 lock->owner_cpu = -1; __rwlock_init()
52 static void spin_dump(raw_spinlock_t *lock, const char *msg) spin_dump() argument
56 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) spin_dump()
57 owner = lock->owner; spin_dump()
61 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " spin_dump()
63 lock, lock->magic, spin_dump()
66 lock->owner_cpu); spin_dump()
70 static void spin_bug(raw_spinlock_t *lock, const char *msg) spin_bug() argument
75 spin_dump(lock, msg); spin_bug()
78 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
81 debug_spin_lock_before(raw_spinlock_t *lock) debug_spin_lock_before() argument
83 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); debug_spin_lock_before()
84 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); debug_spin_lock_before()
85 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), debug_spin_lock_before()
86 lock, "cpu recursion"); debug_spin_lock_before()
89 static inline void debug_spin_lock_after(raw_spinlock_t *lock) debug_spin_lock_after() argument
91 lock->owner_cpu = raw_smp_processor_id(); debug_spin_lock_after()
92 lock->owner = current; debug_spin_lock_after()
95 static inline void debug_spin_unlock(raw_spinlock_t *lock) debug_spin_unlock() argument
97 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); debug_spin_unlock()
98 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); debug_spin_unlock()
99 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); debug_spin_unlock()
100 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), debug_spin_unlock()
101 lock, "wrong CPU"); debug_spin_unlock()
102 lock->owner = SPINLOCK_OWNER_INIT; debug_spin_unlock()
103 lock->owner_cpu = -1; debug_spin_unlock()
106 static void __spin_lock_debug(raw_spinlock_t *lock) __spin_lock_debug() argument
112 if (arch_spin_trylock(&lock->raw_lock)) __spin_lock_debug()
117 spin_dump(lock, "lockup suspected"); __spin_lock_debug()
124 * specific lock code a chance to acquire the lock. We have already __spin_lock_debug()
126 * specific code might actually succeed in acquiring the lock. If it is __spin_lock_debug()
130 arch_spin_lock(&lock->raw_lock); __spin_lock_debug()
133 void do_raw_spin_lock(raw_spinlock_t *lock) do_raw_spin_lock() argument
135 debug_spin_lock_before(lock); do_raw_spin_lock()
136 if (unlikely(!arch_spin_trylock(&lock->raw_lock))) do_raw_spin_lock()
137 __spin_lock_debug(lock); do_raw_spin_lock()
138 debug_spin_lock_after(lock); do_raw_spin_lock()
141 int do_raw_spin_trylock(raw_spinlock_t *lock) do_raw_spin_trylock() argument
143 int ret = arch_spin_trylock(&lock->raw_lock); do_raw_spin_trylock()
146 debug_spin_lock_after(lock); do_raw_spin_trylock()
151 SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); do_raw_spin_trylock()
156 void do_raw_spin_unlock(raw_spinlock_t *lock) do_raw_spin_unlock() argument
158 debug_spin_unlock(lock); do_raw_spin_unlock()
159 arch_spin_unlock(&lock->raw_lock); do_raw_spin_unlock()
162 static void rwlock_bug(rwlock_t *lock, const char *msg) rwlock_bug() argument
169 task_pid_nr(current), lock); rwlock_bug()
173 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
175 #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
176 static void __read_lock_debug(rwlock_t *lock)
184 if (arch_read_trylock(&lock->raw_lock))
191 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
194 current->pid, lock);
201 void do_raw_read_lock(rwlock_t *lock) do_raw_read_lock() argument
203 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); do_raw_read_lock()
204 arch_read_lock(&lock->raw_lock); do_raw_read_lock()
207 int do_raw_read_trylock(rwlock_t *lock) do_raw_read_trylock() argument
209 int ret = arch_read_trylock(&lock->raw_lock); do_raw_read_trylock()
215 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); do_raw_read_trylock()
220 void do_raw_read_unlock(rwlock_t *lock) do_raw_read_unlock() argument
222 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); do_raw_read_unlock()
223 arch_read_unlock(&lock->raw_lock); do_raw_read_unlock()
226 static inline void debug_write_lock_before(rwlock_t *lock) debug_write_lock_before() argument
228 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); debug_write_lock_before()
229 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); debug_write_lock_before()
230 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), debug_write_lock_before()
231 lock, "cpu recursion"); debug_write_lock_before()
234 static inline void debug_write_lock_after(rwlock_t *lock) debug_write_lock_after() argument
236 lock->owner_cpu = raw_smp_processor_id(); debug_write_lock_after()
237 lock->owner = current; debug_write_lock_after()
240 static inline void debug_write_unlock(rwlock_t *lock) debug_write_unlock() argument
242 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); debug_write_unlock()
243 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); debug_write_unlock()
244 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), debug_write_unlock()
245 lock, "wrong CPU"); debug_write_unlock()
246 lock->owner = SPINLOCK_OWNER_INIT; debug_write_unlock()
247 lock->owner_cpu = -1; debug_write_unlock()
251 static void __write_lock_debug(rwlock_t *lock)
259 if (arch_write_trylock(&lock->raw_lock))
266 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
269 current->pid, lock);
276 void do_raw_write_lock(rwlock_t *lock) do_raw_write_lock() argument
278 debug_write_lock_before(lock); do_raw_write_lock()
279 arch_write_lock(&lock->raw_lock); do_raw_write_lock()
280 debug_write_lock_after(lock); do_raw_write_lock()
283 int do_raw_write_trylock(rwlock_t *lock) do_raw_write_trylock() argument
285 int ret = arch_write_trylock(&lock->raw_lock); do_raw_write_trylock()
288 debug_write_lock_after(lock); do_raw_write_trylock()
293 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); do_raw_write_trylock()
298 void do_raw_write_unlock(rwlock_t *lock) do_raw_write_unlock() argument
300 debug_write_unlock(lock); do_raw_write_unlock()
301 arch_write_unlock(&lock->raw_lock); do_raw_write_unlock()
H A Dmutex.h12 #define spin_lock_mutex(lock, flags) \
13 do { spin_lock(lock); (void)(flags); } while (0)
14 #define spin_unlock_mutex(lock, flags) \
15 do { spin_unlock(lock); (void)(flags); } while (0)
16 #define mutex_remove_waiter(lock, waiter, ti) \
20 static inline void mutex_set_owner(struct mutex *lock) mutex_set_owner() argument
22 lock->owner = current; mutex_set_owner()
25 static inline void mutex_clear_owner(struct mutex *lock) mutex_clear_owner() argument
27 lock->owner = NULL; mutex_clear_owner()
30 static inline void mutex_set_owner(struct mutex *lock) mutex_set_owner() argument
34 static inline void mutex_clear_owner(struct mutex *lock) mutex_clear_owner() argument
39 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
41 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
42 #define debug_mutex_unlock(lock) do { } while (0)
43 #define debug_mutex_init(lock, name, key) do { } while (0)
46 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_lock_common() argument
H A Dlglock.c21 arch_spinlock_t *lock; lg_local_lock() local
25 lock = this_cpu_ptr(lg->lock); lg_local_lock()
26 arch_spin_lock(lock); lg_local_lock()
32 arch_spinlock_t *lock; lg_local_unlock() local
35 lock = this_cpu_ptr(lg->lock); lg_local_unlock()
36 arch_spin_unlock(lock); lg_local_unlock()
43 arch_spinlock_t *lock; lg_local_lock_cpu() local
47 lock = per_cpu_ptr(lg->lock, cpu); lg_local_lock_cpu()
48 arch_spin_lock(lock); lg_local_lock_cpu()
54 arch_spinlock_t *lock; lg_local_unlock_cpu() local
57 lock = per_cpu_ptr(lg->lock, cpu); lg_local_unlock_cpu()
58 arch_spin_unlock(lock); lg_local_unlock_cpu()
70 arch_spinlock_t *lock; for_each_possible_cpu() local
71 lock = per_cpu_ptr(lg->lock, i); for_each_possible_cpu()
72 arch_spin_lock(lock); for_each_possible_cpu()
83 arch_spinlock_t *lock; for_each_possible_cpu() local
84 lock = per_cpu_ptr(lg->lock, i); for_each_possible_cpu()
85 arch_spin_unlock(lock); for_each_possible_cpu()
H A Dspinlock.c27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
39 * Some architectures can relax in favour of the CPU owning the lock.
56 * This could be a long-held lock. We both prepare to spin for a long
58 * towards that other CPU that it should break the lock ASAP.
61 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
65 if (likely(do_raw_##op##_trylock(lock))) \
69 if (!(lock)->break_lock) \
70 (lock)->break_lock = 1; \
71 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
72 arch_##op##_relax(&lock->raw_lock); \
74 (lock)->break_lock = 0; \
77 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
84 if (likely(do_raw_##op##_trylock(lock))) \
89 if (!(lock)->break_lock) \
90 (lock)->break_lock = 1; \
91 while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
92 arch_##op##_relax(&lock->raw_lock); \
94 (lock)->break_lock = 0; \
98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
100 _raw_##op##_lock_irqsave(lock); \
103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
112 flags = _raw_##op##_lock_irqsave(lock); \
119 * lock-spinning functions:
133 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) _raw_spin_trylock() argument
135 return __raw_spin_trylock(lock); _raw_spin_trylock()
141 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) _raw_spin_trylock_bh() argument
143 return __raw_spin_trylock_bh(lock); _raw_spin_trylock_bh()
149 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) _raw_spin_lock() argument
151 __raw_spin_lock(lock); _raw_spin_lock()
157 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) _raw_spin_lock_irqsave() argument
159 return __raw_spin_lock_irqsave(lock); _raw_spin_lock_irqsave()
165 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) _raw_spin_lock_irq() argument
167 __raw_spin_lock_irq(lock); _raw_spin_lock_irq()
173 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) _raw_spin_lock_bh() argument
175 __raw_spin_lock_bh(lock); _raw_spin_lock_bh()
181 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) _raw_spin_unlock() argument
183 __raw_spin_unlock(lock); _raw_spin_unlock()
189 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) _raw_spin_unlock_irqrestore() argument
191 __raw_spin_unlock_irqrestore(lock, flags); _raw_spin_unlock_irqrestore()
197 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) _raw_spin_unlock_irq() argument
199 __raw_spin_unlock_irq(lock); _raw_spin_unlock_irq()
205 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) _raw_spin_unlock_bh() argument
207 __raw_spin_unlock_bh(lock); _raw_spin_unlock_bh()
213 int __lockfunc _raw_read_trylock(rwlock_t *lock) _raw_read_trylock() argument
215 return __raw_read_trylock(lock); _raw_read_trylock()
221 void __lockfunc _raw_read_lock(rwlock_t *lock) _raw_read_lock() argument
223 __raw_read_lock(lock); _raw_read_lock()
229 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) _raw_read_lock_irqsave() argument
231 return __raw_read_lock_irqsave(lock); _raw_read_lock_irqsave()
237 void __lockfunc _raw_read_lock_irq(rwlock_t *lock) _raw_read_lock_irq() argument
239 __raw_read_lock_irq(lock); _raw_read_lock_irq()
245 void __lockfunc _raw_read_lock_bh(rwlock_t *lock) _raw_read_lock_bh() argument
247 __raw_read_lock_bh(lock); _raw_read_lock_bh()
253 void __lockfunc _raw_read_unlock(rwlock_t *lock) _raw_read_unlock() argument
255 __raw_read_unlock(lock); _raw_read_unlock()
261 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) _raw_read_unlock_irqrestore() argument
263 __raw_read_unlock_irqrestore(lock, flags); _raw_read_unlock_irqrestore()
269 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) _raw_read_unlock_irq() argument
271 __raw_read_unlock_irq(lock); _raw_read_unlock_irq()
277 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) _raw_read_unlock_bh() argument
279 __raw_read_unlock_bh(lock); _raw_read_unlock_bh()
285 int __lockfunc _raw_write_trylock(rwlock_t *lock) _raw_write_trylock() argument
287 return __raw_write_trylock(lock); _raw_write_trylock()
293 void __lockfunc _raw_write_lock(rwlock_t *lock) _raw_write_lock() argument
295 __raw_write_lock(lock); _raw_write_lock()
301 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) _raw_write_lock_irqsave() argument
303 return __raw_write_lock_irqsave(lock); _raw_write_lock_irqsave()
309 void __lockfunc _raw_write_lock_irq(rwlock_t *lock) _raw_write_lock_irq() argument
311 __raw_write_lock_irq(lock); _raw_write_lock_irq()
317 void __lockfunc _raw_write_lock_bh(rwlock_t *lock) _raw_write_lock_bh() argument
319 __raw_write_lock_bh(lock); _raw_write_lock_bh()
325 void __lockfunc _raw_write_unlock(rwlock_t *lock) _raw_write_unlock() argument
327 __raw_write_unlock(lock); _raw_write_unlock()
333 void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) _raw_write_unlock_irqrestore() argument
335 __raw_write_unlock_irqrestore(lock, flags); _raw_write_unlock_irqrestore()
341 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) _raw_write_unlock_irq() argument
343 __raw_write_unlock_irq(lock); _raw_write_unlock_irq()
349 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) _raw_write_unlock_bh() argument
351 __raw_write_unlock_bh(lock); _raw_write_unlock_bh()
358 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) _raw_spin_lock_nested() argument
361 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); _raw_spin_lock_nested()
362 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); _raw_spin_lock_nested()
366 void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass) _raw_spin_lock_bh_nested() argument
369 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); _raw_spin_lock_bh_nested()
370 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); _raw_spin_lock_bh_nested()
374 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, _raw_spin_lock_irqsave_nested() argument
381 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); _raw_spin_lock_irqsave_nested()
382 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, _raw_spin_lock_irqsave_nested()
388 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, _raw_spin_lock_nest_lock() argument
392 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); _raw_spin_lock_nest_lock()
393 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); _raw_spin_lock_nest_lock()
H A Dmutex-debug.c10 * lock debugging, locking tree, deadlock detection started by:
28 * Must be called with lock->wait_lock held.
30 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_lock_common() argument
37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_wake_waiter() argument
39 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_wake_waiter()
40 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); debug_mutex_wake_waiter()
51 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, debug_mutex_add_waiter() argument
54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_add_waiter()
56 /* Mark the current thread as blocked on the lock: */ debug_mutex_add_waiter()
60 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, mutex_remove_waiter() argument
72 void debug_mutex_unlock(struct mutex *lock) debug_mutex_unlock() argument
75 DEBUG_LOCKS_WARN_ON(lock->magic != lock); debug_mutex_unlock()
77 if (!lock->owner) debug_mutex_unlock()
78 DEBUG_LOCKS_WARN_ON(!lock->owner); debug_mutex_unlock()
80 DEBUG_LOCKS_WARN_ON(lock->owner != current); debug_mutex_unlock()
82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); debug_mutex_unlock()
89 mutex_clear_owner(lock); debug_mutex_unlock()
90 atomic_set(&lock->count, 1); debug_mutex_unlock()
93 void debug_mutex_init(struct mutex *lock, const char *name, debug_mutex_init() argument
98 * Make sure we are not reinitializing a held lock: debug_mutex_init()
100 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); debug_mutex_init()
101 lockdep_init_map(&lock->dep_map, name, key, 0); debug_mutex_init()
103 lock->magic = lock; debug_mutex_init()
108 * @lock: the mutex to be destroyed
114 void mutex_destroy(struct mutex *lock) mutex_destroy() argument
116 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); mutex_destroy()
117 lock->magic = NULL; mutex_destroy()
H A Drtmutex.c23 * lock->owner state tracking:
25 * lock->owner holds the task_struct pointer of the owner. Bit 0
26 * is used to keep track of the "lock has waiters" state.
29 * NULL 0 lock is free (fast acquire possible)
30 * NULL 1 lock is free and has waiters and the top waiter
31 * is going to take the lock*
32 * taskpointer 0 lock is held (fast release possible)
33 * taskpointer 1 lock is held and has waiters**
36 * possible when bit 0 of lock->owner is 0.
38 * (*) It also can be a transitional state when grabbing the lock
39 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40 * we need to set the bit0 before looking at the lock, and the owner may be
44 * waiters. This can happen when grabbing the lock in the slow path.
45 * To prevent a cmpxchg of the owner releasing the lock, we need to
46 * set this bit before looking at the lock.
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) rt_mutex_set_owner() argument
54 if (rt_mutex_has_waiters(lock)) rt_mutex_set_owner()
57 lock->owner = (struct task_struct *)val; rt_mutex_set_owner()
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) clear_rt_mutex_waiters() argument
62 lock->owner = (struct task_struct *) clear_rt_mutex_waiters()
63 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); clear_rt_mutex_waiters()
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) fixup_rt_mutex_waiters() argument
68 if (!rt_mutex_has_waiters(lock)) fixup_rt_mutex_waiters()
69 clear_rt_mutex_waiters(lock); fixup_rt_mutex_waiters()
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) mark_rt_mutex_waiters() argument
80 unsigned long owner, *p = (unsigned long *) &lock->owner; mark_rt_mutex_waiters()
90 * 2) Drop lock->wait_lock
91 * 3) Try to unlock the lock with cmpxchg
93 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94 __releases(lock->wait_lock)
96 struct task_struct *owner = rt_mutex_owner(lock);
98 clear_rt_mutex_waiters(lock); variable
99 raw_spin_unlock(&lock->wait_lock);
105 * lock(wait_lock);
107 * mark_rt_mutex_waiters(lock);
108 * acquire(lock);
112 * lock(wait_lock);
113 * mark_rt_mutex_waiters(lock);
118 * lock(wait_lock);
121 * lock(wait_lock);
122 * acquire(lock);
124 return rt_mutex_cmpxchg(lock, owner, NULL);
129 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) mark_rt_mutex_waiters() argument
131 lock->owner = (struct task_struct *) mark_rt_mutex_waiters()
132 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); mark_rt_mutex_waiters()
136 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
138 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139 __releases(lock->wait_lock)
141 lock->owner = NULL;
142 raw_spin_unlock(&lock->wait_lock);
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_enqueue() argument
169 struct rb_node **link = &lock->waiters.rb_node; rt_mutex_enqueue()
186 lock->waiters_leftmost = &waiter->tree_entry; rt_mutex_enqueue()
189 rb_insert_color(&waiter->tree_entry, &lock->waiters); rt_mutex_enqueue()
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_dequeue() argument
198 if (lock->waiters_leftmost == &waiter->tree_entry) rt_mutex_dequeue()
199 lock->waiters_leftmost = rb_next(&waiter->tree_entry); rt_mutex_dequeue()
201 rb_erase(&waiter->tree_entry, &lock->waiters); rt_mutex_dequeue()
298 * (Note: We do this outside of the protection of lock->wait_lock to
299 * allow the lock to be taken while or before we readjust the priority
345 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; task_blocked_on_lock()
360 * comparison to detect lock chain changes.
389 * [1] lock(task->pi_lock); [R] acquire [P]
392 * [4] lock = waiter->lock; [P]
393 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
398 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
402 * [10] task = owner(lock); [L]
404 * lock(task->pi_lock); [L] acquire [P]
405 * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
408 * unlock(lock->wait_lock); release [L]
421 struct rt_mutex *lock; rt_mutex_adjust_prio_chain() local
436 * We limit the lock chain length for each invocation. rt_mutex_adjust_prio_chain()
447 printk(KERN_WARNING "Maximum lock depth %d reached " rt_mutex_adjust_prio_chain()
487 * the previous owner of the lock might have released the lock. rt_mutex_adjust_prio_chain()
494 * the task might have moved on in the lock chain or even left rt_mutex_adjust_prio_chain()
495 * the chain completely and blocks now on an unrelated lock or rt_mutex_adjust_prio_chain()
498 * We stored the lock on which @task was blocked in @next_lock, rt_mutex_adjust_prio_chain()
501 if (next_lock != waiter->lock) rt_mutex_adjust_prio_chain()
541 * [4] Get the next lock rt_mutex_adjust_prio_chain()
543 lock = waiter->lock; rt_mutex_adjust_prio_chain()
546 * which is the reverse lock order versus the other rtmutex rt_mutex_adjust_prio_chain()
549 if (!raw_spin_trylock(&lock->wait_lock)) { rt_mutex_adjust_prio_chain()
557 * lock->wait_lock. rt_mutex_adjust_prio_chain()
559 * Deadlock detection. If the lock is the same as the original rt_mutex_adjust_prio_chain()
560 * lock which caused us to walk the lock chain or if the rt_mutex_adjust_prio_chain()
561 * current lock is owned by the task which initiated the chain rt_mutex_adjust_prio_chain()
564 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { rt_mutex_adjust_prio_chain()
565 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); rt_mutex_adjust_prio_chain()
566 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
572 * If we just follow the lock chain for deadlock detection, no rt_mutex_adjust_prio_chain()
585 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain()
586 * If there is no owner of the lock, end of chain. rt_mutex_adjust_prio_chain()
588 if (!rt_mutex_owner(lock)) { rt_mutex_adjust_prio_chain()
589 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
593 /* [10] Grab the next task, i.e. owner of @lock */ rt_mutex_adjust_prio_chain()
594 task = rt_mutex_owner(lock); rt_mutex_adjust_prio_chain()
608 top_waiter = rt_mutex_top_waiter(lock); rt_mutex_adjust_prio_chain()
612 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
622 * operation on @lock. We need it for the boost/deboost rt_mutex_adjust_prio_chain()
625 prerequeue_top_waiter = rt_mutex_top_waiter(lock); rt_mutex_adjust_prio_chain()
627 /* [7] Requeue the waiter in the lock waiter list. */ rt_mutex_adjust_prio_chain()
628 rt_mutex_dequeue(lock, waiter); rt_mutex_adjust_prio_chain()
630 rt_mutex_enqueue(lock, waiter); rt_mutex_adjust_prio_chain()
637 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain()
639 * We must abort the chain walk if there is no lock owner even rt_mutex_adjust_prio_chain()
640 * in the dead lock detection case, as we have nothing to rt_mutex_adjust_prio_chain()
643 if (!rt_mutex_owner(lock)) { rt_mutex_adjust_prio_chain()
647 * to get the lock. rt_mutex_adjust_prio_chain()
649 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) rt_mutex_adjust_prio_chain()
650 wake_up_process(rt_mutex_top_waiter(lock)->task); rt_mutex_adjust_prio_chain()
651 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
655 /* [10] Grab the next task, i.e. the owner of @lock */ rt_mutex_adjust_prio_chain()
656 task = rt_mutex_owner(lock); rt_mutex_adjust_prio_chain()
661 if (waiter == rt_mutex_top_waiter(lock)) { rt_mutex_adjust_prio_chain()
664 * waiter on the lock. Replace the previous top waiter rt_mutex_adjust_prio_chain()
674 * The waiter was the top waiter on the lock, but is rt_mutex_adjust_prio_chain()
684 waiter = rt_mutex_top_waiter(lock); rt_mutex_adjust_prio_chain()
696 * and lock->wait_lock. The actual decisions are made after we rt_mutex_adjust_prio_chain()
699 * Check whether the task which owns the current lock is pi rt_mutex_adjust_prio_chain()
700 * blocked itself. If yes we store a pointer to the lock for rt_mutex_adjust_prio_chain()
701 * the lock chain change detection above. After we dropped rt_mutex_adjust_prio_chain()
706 * Store the top waiter of @lock for the end of chain walk rt_mutex_adjust_prio_chain()
709 top_waiter = rt_mutex_top_waiter(lock); rt_mutex_adjust_prio_chain()
713 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
719 * We reached the end of the lock chain. Stop right here. No rt_mutex_adjust_prio_chain()
726 * If the current waiter is not the top waiter on the lock, rt_mutex_adjust_prio_chain()
746 * Must be called with lock->wait_lock held.
748 * @lock: The lock to be acquired.
749 * @task: The task which wants to acquire the lock
750 * @waiter: The waiter that is queued to the lock's wait list if the
753 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, try_to_take_rt_mutex() argument
759 * Before testing whether we can acquire @lock, we set the try_to_take_rt_mutex()
760 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all try_to_take_rt_mutex()
761 * other tasks which try to modify @lock into the slow path try_to_take_rt_mutex()
762 * and they serialize on @lock->wait_lock. try_to_take_rt_mutex()
767 * - There is a lock owner. The caller must fixup the try_to_take_rt_mutex()
768 * transient state if it does a trylock or leaves the lock try_to_take_rt_mutex()
771 * - @task acquires the lock and there are no other try_to_take_rt_mutex()
775 mark_rt_mutex_waiters(lock); try_to_take_rt_mutex()
778 * If @lock has an owner, give up. try_to_take_rt_mutex()
780 if (rt_mutex_owner(lock)) try_to_take_rt_mutex()
785 * into @lock waiter list. If @waiter == NULL then this is a try_to_take_rt_mutex()
791 * @lock, give up. try_to_take_rt_mutex()
793 if (waiter != rt_mutex_top_waiter(lock)) try_to_take_rt_mutex()
797 * We can acquire the lock. Remove the waiter from the try_to_take_rt_mutex()
798 * lock waiters list. try_to_take_rt_mutex()
800 rt_mutex_dequeue(lock, waiter); try_to_take_rt_mutex()
804 * If the lock has waiters already we check whether @task is try_to_take_rt_mutex()
805 * eligible to take over the lock. try_to_take_rt_mutex()
808 * the lock. @task->pi_blocked_on is NULL, so it does try_to_take_rt_mutex()
811 if (rt_mutex_has_waiters(lock)) { try_to_take_rt_mutex()
817 if (task->prio >= rt_mutex_top_waiter(lock)->prio) try_to_take_rt_mutex()
822 * don't have to change anything in the lock try_to_take_rt_mutex()
827 * No waiters. Take the lock without the try_to_take_rt_mutex()
845 * Finish the lock acquisition. @task is the new owner. If try_to_take_rt_mutex()
849 if (rt_mutex_has_waiters(lock)) try_to_take_rt_mutex()
850 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); try_to_take_rt_mutex()
854 /* We got the lock. */ try_to_take_rt_mutex()
855 debug_rt_mutex_lock(lock); try_to_take_rt_mutex()
861 rt_mutex_set_owner(lock, task); try_to_take_rt_mutex()
863 rt_mutex_deadlock_account_lock(lock, task); try_to_take_rt_mutex()
869 * Task blocks on lock.
873 * This must be called with lock->wait_lock held.
875 static int task_blocks_on_rt_mutex(struct rt_mutex *lock, task_blocks_on_rt_mutex() argument
880 struct task_struct *owner = rt_mutex_owner(lock); task_blocks_on_rt_mutex()
901 waiter->lock = lock; task_blocks_on_rt_mutex()
904 /* Get the top priority waiter on the lock */ task_blocks_on_rt_mutex()
905 if (rt_mutex_has_waiters(lock)) task_blocks_on_rt_mutex()
906 top_waiter = rt_mutex_top_waiter(lock); task_blocks_on_rt_mutex()
907 rt_mutex_enqueue(lock, waiter); task_blocks_on_rt_mutex()
917 if (waiter == rt_mutex_top_waiter(lock)) { task_blocks_on_rt_mutex()
928 /* Store the lock on which owner is blocked or NULL */ task_blocks_on_rt_mutex()
941 * The owner can't disappear while holding a lock, task_blocks_on_rt_mutex()
947 raw_spin_unlock(&lock->wait_lock); task_blocks_on_rt_mutex()
949 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, task_blocks_on_rt_mutex()
952 raw_spin_lock(&lock->wait_lock); task_blocks_on_rt_mutex()
958 * Wake up the next waiter on the lock.
963 * Called with lock->wait_lock held.
965 static void wakeup_next_waiter(struct rt_mutex *lock) wakeup_next_waiter() argument
972 waiter = rt_mutex_top_waiter(lock); wakeup_next_waiter()
978 * lock->wait_lock. wakeup_next_waiter()
984 * queued on the lock until it gets the lock, this lock wakeup_next_waiter()
988 * the top waiter can steal this lock. wakeup_next_waiter()
990 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; wakeup_next_waiter()
996 * long as we hold lock->wait_lock. The waiter task needs to wakeup_next_waiter()
1003 * Remove a waiter from a lock and give up
1005 * Must be called with lock->wait_lock held and
1008 static void remove_waiter(struct rt_mutex *lock, remove_waiter() argument
1011 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); remove_waiter()
1012 struct task_struct *owner = rt_mutex_owner(lock); remove_waiter()
1017 rt_mutex_dequeue(lock, waiter); remove_waiter()
1023 * waiter of the lock and there is an owner to update. remove_waiter()
1032 if (rt_mutex_has_waiters(lock)) remove_waiter()
1033 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); remove_waiter()
1037 /* Store the lock on which owner is blocked or NULL */ remove_waiter()
1052 raw_spin_unlock(&lock->wait_lock); remove_waiter()
1054 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, remove_waiter()
1057 raw_spin_lock(&lock->wait_lock); remove_waiter()
1079 next_lock = waiter->lock; rt_mutex_adjust_pi()
1091 * @lock: the rt_mutex to take
1097 * lock->wait_lock must be held by the caller.
1100 __rt_mutex_slowlock(struct rt_mutex *lock, int state, __rt_mutex_slowlock() argument
1107 /* Try to acquire the lock: */ __rt_mutex_slowlock()
1108 if (try_to_take_rt_mutex(lock, current, waiter)) __rt_mutex_slowlock()
1125 raw_spin_unlock(&lock->wait_lock); __rt_mutex_slowlock()
1129 schedule_rt_mutex(lock); __rt_mutex_slowlock()
1131 raw_spin_lock(&lock->wait_lock); __rt_mutex_slowlock()
1160 * Slow path lock function:
1163 rt_mutex_slowlock(struct rt_mutex *lock, int state, rt_mutex_slowlock() argument
1174 raw_spin_lock(&lock->wait_lock); rt_mutex_slowlock()
1176 /* Try to acquire the lock again: */ rt_mutex_slowlock()
1177 if (try_to_take_rt_mutex(lock, current, NULL)) { rt_mutex_slowlock()
1178 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock()
1191 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); rt_mutex_slowlock()
1195 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); rt_mutex_slowlock()
1199 if (rt_mutex_has_waiters(lock)) rt_mutex_slowlock()
1200 remove_waiter(lock, &waiter); rt_mutex_slowlock()
1208 fixup_rt_mutex_waiters(lock); rt_mutex_slowlock()
1210 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock()
1222 * Slow path try-lock function:
1224 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) rt_mutex_slowtrylock() argument
1229 * If the lock already has an owner we fail to get the lock. rt_mutex_slowtrylock()
1230 * This can be done without taking the @lock->wait_lock as rt_mutex_slowtrylock()
1233 if (rt_mutex_owner(lock)) rt_mutex_slowtrylock()
1237 * The mutex has currently no owner. Lock the wait lock and rt_mutex_slowtrylock()
1238 * try to acquire the lock. rt_mutex_slowtrylock()
1240 raw_spin_lock(&lock->wait_lock); rt_mutex_slowtrylock()
1242 ret = try_to_take_rt_mutex(lock, current, NULL); rt_mutex_slowtrylock()
1245 * try_to_take_rt_mutex() sets the lock waiters bit rt_mutex_slowtrylock()
1248 fixup_rt_mutex_waiters(lock); rt_mutex_slowtrylock()
1250 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowtrylock()
1259 rt_mutex_slowunlock(struct rt_mutex *lock) rt_mutex_slowunlock() argument
1261 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock()
1263 debug_rt_mutex_unlock(lock); rt_mutex_slowunlock()
1272 * foo->lock->owner = NULL; rt_mutex_slowunlock()
1273 * rtmutex_lock(foo->lock); <- fast path rt_mutex_slowunlock()
1275 * rtmutex_unlock(foo->lock); <- fast path rt_mutex_slowunlock()
1278 * raw_spin_unlock(foo->lock->wait_lock); rt_mutex_slowunlock()
1283 * lock->wait_lock. So we do the following sequence: rt_mutex_slowunlock()
1285 * owner = rt_mutex_owner(lock); rt_mutex_slowunlock()
1286 * clear_rt_mutex_waiters(lock); rt_mutex_slowunlock()
1287 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1288 * if (cmpxchg(&lock->owner, owner, 0) == owner) rt_mutex_slowunlock()
1293 * lock->owner is serialized by lock->wait_lock: rt_mutex_slowunlock()
1295 * lock->owner = NULL; rt_mutex_slowunlock()
1296 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1298 while (!rt_mutex_has_waiters(lock)) { rt_mutex_slowunlock()
1299 /* Drops lock->wait_lock ! */ rt_mutex_slowunlock()
1300 if (unlock_rt_mutex_safe(lock) == true) rt_mutex_slowunlock()
1303 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock()
1310 wakeup_next_waiter(lock); rt_mutex_slowunlock()
1312 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1319 * debug aware fast / slowpath lock,trylock,unlock
1325 rt_mutex_fastlock(struct rt_mutex *lock, int state, rt_mutex_fastlock() argument
1326 int (*slowfn)(struct rt_mutex *lock, int state, rt_mutex_fastlock()
1330 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_fastlock()
1331 rt_mutex_deadlock_account_lock(lock, current); rt_mutex_fastlock()
1334 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); rt_mutex_fastlock()
1338 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, rt_mutex_timed_fastlock() argument
1341 int (*slowfn)(struct rt_mutex *lock, int state, rt_mutex_timed_fastlock()
1346 likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_timed_fastlock()
1347 rt_mutex_deadlock_account_lock(lock, current); rt_mutex_timed_fastlock()
1350 return slowfn(lock, state, timeout, chwalk); rt_mutex_timed_fastlock()
1354 rt_mutex_fasttrylock(struct rt_mutex *lock, rt_mutex_fasttrylock() argument
1355 int (*slowfn)(struct rt_mutex *lock)) rt_mutex_fasttrylock()
1357 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_fasttrylock()
1358 rt_mutex_deadlock_account_lock(lock, current); rt_mutex_fasttrylock()
1361 return slowfn(lock); rt_mutex_fasttrylock()
1365 rt_mutex_fastunlock(struct rt_mutex *lock, rt_mutex_fastunlock() argument
1366 void (*slowfn)(struct rt_mutex *lock)) rt_mutex_fastunlock()
1368 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) rt_mutex_fastunlock()
1371 slowfn(lock); rt_mutex_fastunlock()
1375 * rt_mutex_lock - lock a rt_mutex
1377 * @lock: the rt_mutex to be locked
1379 void __sched rt_mutex_lock(struct rt_mutex *lock) rt_mutex_lock() argument
1383 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); rt_mutex_lock()
1388 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
1390 * @lock: the rt_mutex to be locked
1396 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) rt_mutex_lock_interruptible() argument
1400 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); rt_mutex_lock_interruptible()
1407 int rt_mutex_timed_futex_lock(struct rt_mutex *lock, rt_mutex_timed_futex_lock() argument
1412 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, rt_mutex_timed_futex_lock()
1418 * rt_mutex_timed_lock - lock a rt_mutex interruptible
1422 * @lock: the rt_mutex to be locked
1431 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) rt_mutex_timed_lock() argument
1435 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, rt_mutex_timed_lock()
1442 * rt_mutex_trylock - try to lock a rt_mutex
1444 * @lock: the rt_mutex to be locked
1448 int __sched rt_mutex_trylock(struct rt_mutex *lock) rt_mutex_trylock() argument
1450 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); rt_mutex_trylock()
1457 * @lock: the rt_mutex to be unlocked
1459 void __sched rt_mutex_unlock(struct rt_mutex *lock) rt_mutex_unlock() argument
1461 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); rt_mutex_unlock()
1467 * @lock: the mutex to be destroyed
1473 void rt_mutex_destroy(struct rt_mutex *lock) rt_mutex_destroy() argument
1475 WARN_ON(rt_mutex_is_locked(lock)); rt_mutex_destroy()
1477 lock->magic = NULL; rt_mutex_destroy()
1484 * __rt_mutex_init - initialize the rt lock
1486 * @lock: the rt lock to be initialized
1488 * Initialize the rt lock to unlocked state.
1490 * Initializing of a locked rt lock is not allowed
1492 void __rt_mutex_init(struct rt_mutex *lock, const char *name) __rt_mutex_init() argument
1494 lock->owner = NULL; __rt_mutex_init()
1495 raw_spin_lock_init(&lock->wait_lock); __rt_mutex_init()
1496 lock->waiters = RB_ROOT; __rt_mutex_init()
1497 lock->waiters_leftmost = NULL; __rt_mutex_init()
1499 debug_rt_mutex_init(lock, name); __rt_mutex_init()
1504 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1507 * @lock: the rt_mutex to be locked
1513 void rt_mutex_init_proxy_locked(struct rt_mutex *lock, rt_mutex_init_proxy_locked() argument
1516 __rt_mutex_init(lock, NULL); rt_mutex_init_proxy_locked()
1517 debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_init_proxy_locked()
1518 rt_mutex_set_owner(lock, proxy_owner); rt_mutex_init_proxy_locked()
1519 rt_mutex_deadlock_account_lock(lock, proxy_owner); rt_mutex_init_proxy_locked()
1523 * rt_mutex_proxy_unlock - release a lock on behalf of owner
1525 * @lock: the rt_mutex to be locked
1530 void rt_mutex_proxy_unlock(struct rt_mutex *lock, rt_mutex_proxy_unlock() argument
1533 debug_rt_mutex_proxy_unlock(lock); rt_mutex_proxy_unlock()
1534 rt_mutex_set_owner(lock, NULL); rt_mutex_proxy_unlock()
1539 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1540 * @lock: the rt_mutex to take
1545 * 0 - task blocked on lock
1546 * 1 - acquired the lock for task, caller should wake it up
1551 int rt_mutex_start_proxy_lock(struct rt_mutex *lock, rt_mutex_start_proxy_lock() argument
1557 raw_spin_lock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1559 if (try_to_take_rt_mutex(lock, task, NULL)) { rt_mutex_start_proxy_lock()
1560 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1565 ret = task_blocks_on_rt_mutex(lock, waiter, task, rt_mutex_start_proxy_lock()
1568 if (ret && !rt_mutex_owner(lock)) { rt_mutex_start_proxy_lock()
1572 * released the lock while we were walking the rt_mutex_start_proxy_lock()
1579 remove_waiter(lock, waiter); rt_mutex_start_proxy_lock()
1581 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1589 * rt_mutex_next_owner - return the next owner of the lock
1591 * @lock: the rt lock query
1593 * Returns the next owner of the lock or NULL
1595 * Caller has to serialize against other accessors to the lock
1600 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) rt_mutex_next_owner() argument
1602 if (!rt_mutex_has_waiters(lock)) rt_mutex_next_owner()
1605 return rt_mutex_top_waiter(lock)->task; rt_mutex_next_owner()
1609 * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1610 * @lock: the rt_mutex we were woken on
1615 * Complete the lock acquisition started our behalf by another thread.
1623 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, rt_mutex_finish_proxy_lock() argument
1629 raw_spin_lock(&lock->wait_lock); rt_mutex_finish_proxy_lock()
1634 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); rt_mutex_finish_proxy_lock()
1637 remove_waiter(lock, waiter); rt_mutex_finish_proxy_lock()
1643 fixup_rt_mutex_waiters(lock); rt_mutex_finish_proxy_lock()
1645 raw_spin_unlock(&lock->wait_lock); rt_mutex_finish_proxy_lock()
H A Dmutex-debug.h14 * This must be called with lock->wait_lock held.
16 extern void debug_mutex_lock_common(struct mutex *lock,
18 extern void debug_mutex_wake_waiter(struct mutex *lock,
21 extern void debug_mutex_add_waiter(struct mutex *lock,
24 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
26 extern void debug_mutex_unlock(struct mutex *lock);
27 extern void debug_mutex_init(struct mutex *lock, const char *name,
30 static inline void mutex_set_owner(struct mutex *lock) mutex_set_owner() argument
32 lock->owner = current; mutex_set_owner()
35 static inline void mutex_clear_owner(struct mutex *lock) mutex_clear_owner() argument
37 lock->owner = NULL; mutex_clear_owner()
40 #define spin_lock_mutex(lock, flags) \
42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
46 arch_spin_lock(&(lock)->rlock.raw_lock);\
50 #define spin_unlock_mutex(lock, flags) \
52 arch_spin_unlock(&(lock)->rlock.raw_lock); \
H A Drtmutex-debug.c41 static void printk_lock(struct rt_mutex *lock, int print_owner) printk_lock() argument
43 if (lock->name) printk_lock()
45 lock, lock->name); printk_lock()
48 lock, lock->file, lock->line); printk_lock()
50 if (print_owner && rt_mutex_owner(lock)) { printk_lock()
51 printk(".. ->owner: %p\n", lock->owner); printk_lock()
53 printk_task(rt_mutex_owner(lock)); printk_lock()
71 struct rt_mutex *lock) debug_rt_mutex_deadlock()
78 task = rt_mutex_owner(act_waiter->lock); debug_rt_mutex_deadlock()
81 act_waiter->deadlock_lock = lock; debug_rt_mutex_deadlock()
112 printk("\n1) %s/%d is trying to acquire this lock:\n", debug_rt_mutex_print_deadlock()
114 printk_lock(waiter->lock, 1); debug_rt_mutex_print_deadlock()
116 printk("\n2) %s/%d is blocked on this lock:\n", debug_rt_mutex_print_deadlock()
136 void debug_rt_mutex_lock(struct rt_mutex *lock) debug_rt_mutex_lock() argument
140 void debug_rt_mutex_unlock(struct rt_mutex *lock) debug_rt_mutex_unlock() argument
142 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); debug_rt_mutex_unlock()
146 debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) debug_rt_mutex_proxy_lock() argument
150 void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) debug_rt_mutex_proxy_unlock() argument
152 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); debug_rt_mutex_proxy_unlock()
167 void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) debug_rt_mutex_init() argument
170 * Make sure we are not reinitializing a held lock: debug_rt_mutex_init()
172 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); debug_rt_mutex_init()
173 lock->name = name; debug_rt_mutex_init()
177 rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) rt_mutex_deadlock_account_lock() argument
69 debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *act_waiter, struct rt_mutex *lock) debug_rt_mutex_deadlock() argument
H A Dqrwlock.c2 * Queue read/write lock
27 * @lock : Pointer to queue rwlock structure
31 * increment the reader count & wait until the writer releases the lock.
34 rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) rspin_until_writer_unlock() argument
38 cnts = smp_load_acquire((u32 *)&lock->cnts); rspin_until_writer_unlock()
43 * queue_read_lock_slowpath - acquire read lock of a queue rwlock
44 * @lock: Pointer to queue rwlock structure
46 void queue_read_lock_slowpath(struct qrwlock *lock) queue_read_lock_slowpath() argument
51 * Readers come here when they cannot get the lock without waiting queue_read_lock_slowpath()
55 * Readers in interrupt context will spin until the lock is queue_read_lock_slowpath()
58 cnts = smp_load_acquire((u32 *)&lock->cnts); queue_read_lock_slowpath()
59 rspin_until_writer_unlock(lock, cnts); queue_read_lock_slowpath()
62 atomic_sub(_QR_BIAS, &lock->cnts); queue_read_lock_slowpath()
67 arch_spin_lock(&lock->lock); queue_read_lock_slowpath()
72 * the lock. It is possible that an incoming writer may steal the queue_read_lock_slowpath()
73 * lock in the interim, so it is necessary to check the writer byte queue_read_lock_slowpath()
74 * to make sure that the write lock isn't taken. queue_read_lock_slowpath()
76 while (atomic_read(&lock->cnts) & _QW_WMASK) queue_read_lock_slowpath()
79 cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS; queue_read_lock_slowpath()
80 rspin_until_writer_unlock(lock, cnts); queue_read_lock_slowpath()
85 arch_spin_unlock(&lock->lock); queue_read_lock_slowpath()
90 * queue_write_lock_slowpath - acquire write lock of a queue rwlock
91 * @lock : Pointer to queue rwlock structure
93 void queue_write_lock_slowpath(struct qrwlock *lock) queue_write_lock_slowpath() argument
98 arch_spin_lock(&lock->lock); queue_write_lock_slowpath()
100 /* Try to acquire the lock directly if no reader is present */ queue_write_lock_slowpath()
101 if (!atomic_read(&lock->cnts) && queue_write_lock_slowpath()
102 (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)) queue_write_lock_slowpath()
110 cnts = atomic_read(&lock->cnts); queue_write_lock_slowpath()
112 (atomic_cmpxchg(&lock->cnts, cnts, queue_write_lock_slowpath()
121 cnts = atomic_read(&lock->cnts); queue_write_lock_slowpath()
123 (atomic_cmpxchg(&lock->cnts, _QW_WAITING, queue_write_lock_slowpath()
130 arch_spin_unlock(&lock->lock); queue_write_lock_slowpath()
H A Dmcs_spinlock.h2 * MCS lock defines
4 * This file contains the main data structure and API definitions of MCS lock.
6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
8 * to acquire the lock spinning on a local variable.
9 * It avoids expensive cache bouncings that common test-and-set spin-lock
19 int locked; /* 1 if lock acquired */
25 * subsequent operations happen after the lock is acquired.
54 * In order to acquire the lock, the caller should declare a local node and
55 * pass a reference of the node to this function in addition to the lock.
56 * If the lock has already been acquired, then this will proceed to spin
57 * on this node->locked until the previous lock holder sets the node->locked
61 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) mcs_spin_lock() argument
69 prev = xchg(lock, node); mcs_spin_lock()
73 * only spin on its own node->locked value for lock acquisition. mcs_spin_lock()
74 * However, since this thread can immediately acquire the lock mcs_spin_lock()
77 * audit lock status, then set node->locked value here. mcs_spin_lock()
83 /* Wait until the lock holder passes the lock down. */ mcs_spin_lock()
88 * Releases the lock. The caller should pass in the corresponding node that
89 * was used to acquire the lock.
92 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) mcs_spin_unlock() argument
98 * Release the lock by setting it to NULL mcs_spin_unlock()
100 if (likely(cmpxchg(lock, node, NULL) == node)) mcs_spin_unlock()
107 /* Pass lock to next waiter. */ mcs_spin_unlock()
H A Dmutex.c50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) __mutex_init() argument
52 atomic_set(&lock->count, 1); __mutex_init()
53 spin_lock_init(&lock->wait_lock); __mutex_init()
54 INIT_LIST_HEAD(&lock->wait_list); __mutex_init()
55 mutex_clear_owner(lock); __mutex_init()
57 osq_lock_init(&lock->osq); __mutex_init()
60 debug_mutex_init(lock, name, key); __mutex_init()
67 * We split the mutex lock/unlock logic into separate fastpath and
76 * @lock: the mutex to be acquired
95 void __sched mutex_lock(struct mutex *lock) mutex_lock() argument
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); mutex_lock()
103 mutex_set_owner(lock); mutex_lock()
150 * After acquiring lock with fastpath or when we lost out in contested
157 ww_mutex_set_context_fastpath(struct ww_mutex *lock, ww_mutex_set_context_fastpath() argument
163 ww_mutex_lock_acquired(lock, ctx); ww_mutex_set_context_fastpath()
165 lock->ctx = ctx; ww_mutex_set_context_fastpath()
168 * The lock->ctx update should be visible on all cores before ww_mutex_set_context_fastpath()
177 * Check if lock is contended, if not there is nobody to wake up ww_mutex_set_context_fastpath()
179 if (likely(atomic_read(&lock->base.count) == 0)) ww_mutex_set_context_fastpath()
184 * so they can see the new lock->ctx. ww_mutex_set_context_fastpath()
186 spin_lock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath()
187 list_for_each_entry(cur, &lock->base.wait_list, list) { ww_mutex_set_context_fastpath()
188 debug_mutex_wake_waiter(&lock->base, cur); ww_mutex_set_context_fastpath()
191 spin_unlock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath()
195 * After acquiring lock in the slowpath set ctx and wake up any
201 ww_mutex_set_context_slowpath(struct ww_mutex *lock, ww_mutex_set_context_slowpath() argument
206 ww_mutex_lock_acquired(lock, ctx); ww_mutex_set_context_slowpath()
207 lock->ctx = ctx; ww_mutex_set_context_slowpath()
213 list_for_each_entry(cur, &lock->base.wait_list, list) { ww_mutex_set_context_slowpath()
214 debug_mutex_wake_waiter(&lock->base, cur); ww_mutex_set_context_slowpath()
225 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) mutex_spin_on_owner() argument
230 while (lock->owner == owner) { mutex_spin_on_owner()
233 * checking lock->owner still matches owner. If that fails, mutex_spin_on_owner()
254 static inline int mutex_can_spin_on_owner(struct mutex *lock) mutex_can_spin_on_owner() argument
263 owner = READ_ONCE(lock->owner); mutex_can_spin_on_owner()
268 * if lock->owner is not set, the mutex owner may have just acquired mutex_can_spin_on_owner()
275 * Atomically try to take the lock when it is available
277 static inline bool mutex_try_to_acquire(struct mutex *lock) mutex_try_to_acquire() argument
279 return !mutex_is_locked(lock) && mutex_try_to_acquire()
280 (atomic_cmpxchg(&lock->count, 1, 0) == 1); mutex_try_to_acquire()
286 * We try to spin for acquisition when we find that the lock owner
288 * need to reschedule. The rationale is that if the lock owner is
289 * running, it is likely to release the lock soon.
291 * Since this needs the lock owner, and this mutex implementation
292 * doesn't track the owner atomically in the lock field, we need to
298 * The mutex spinners are queued up using MCS lock so that only one
300 * going to happen, there is no point in going through the lock/unlock
303 * Returns true when the lock was taken, otherwise false, indicating
306 static bool mutex_optimistic_spin(struct mutex *lock, mutex_optimistic_spin() argument
311 if (!mutex_can_spin_on_owner(lock)) mutex_optimistic_spin()
317 * MCS (queued) lock first before spinning on the owner field. mutex_optimistic_spin()
319 if (!osq_lock(&lock->osq)) mutex_optimistic_spin()
328 ww = container_of(lock, struct ww_mutex, base); mutex_optimistic_spin()
343 * release the lock or go to sleep. mutex_optimistic_spin()
345 owner = READ_ONCE(lock->owner); mutex_optimistic_spin()
346 if (owner && !mutex_spin_on_owner(lock, owner)) mutex_optimistic_spin()
350 if (mutex_try_to_acquire(lock)) { mutex_optimistic_spin()
351 lock_acquired(&lock->dep_map, ip); mutex_optimistic_spin()
355 ww = container_of(lock, struct ww_mutex, base); mutex_optimistic_spin()
360 mutex_set_owner(lock); mutex_optimistic_spin()
361 osq_unlock(&lock->osq); mutex_optimistic_spin()
367 * owner acquiring the lock and setting the owner field. If mutex_optimistic_spin()
368 * we're an RT task that will live-lock because we won't let mutex_optimistic_spin()
383 osq_unlock(&lock->osq); mutex_optimistic_spin()
387 * reschedule now, before we try-lock the mutex. This avoids getting mutex_optimistic_spin()
402 static bool mutex_optimistic_spin(struct mutex *lock, mutex_optimistic_spin() argument
414 * @lock: the mutex to be released
423 void __sched mutex_unlock(struct mutex *lock) mutex_unlock() argument
435 mutex_clear_owner(lock); mutex_unlock()
437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); mutex_unlock()
444 * @lock: the mutex to be released
453 void __sched ww_mutex_unlock(struct ww_mutex *lock) ww_mutex_unlock() argument
459 if (lock->ctx) { ww_mutex_unlock()
461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); ww_mutex_unlock()
463 if (lock->ctx->acquired > 0) ww_mutex_unlock()
464 lock->ctx->acquired--; ww_mutex_unlock()
465 lock->ctx = NULL; ww_mutex_unlock()
474 mutex_clear_owner(&lock->base); ww_mutex_unlock()
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); ww_mutex_unlock()
481 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_check_stamp() argument
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __ww_mutex_lock_check_stamp()
505 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, __mutex_lock_common() argument
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __mutex_lock_common()
521 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); __mutex_lock_common()
523 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { __mutex_lock_common()
524 /* got the lock, yay! */ __mutex_lock_common()
529 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
532 * Once more, try to acquire the lock. Only try-lock the mutex if __mutex_lock_common()
535 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) __mutex_lock_common()
538 debug_mutex_lock_common(lock, &waiter); __mutex_lock_common()
539 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); __mutex_lock_common()
542 list_add_tail(&waiter.list, &lock->wait_list); __mutex_lock_common()
545 lock_contended(&lock->dep_map, ip); __mutex_lock_common()
549 * Lets try to take the lock again - this is needed even if __mutex_lock_common()
551 * acquire the lock), to make sure that we get a wakeup once __mutex_lock_common()
553 * operation that gives us the lock. We xchg it to -1, so __mutex_lock_common()
554 * that when we release the lock, we properly wake up the __mutex_lock_common()
558 if (atomic_read(&lock->count) >= 0 && __mutex_lock_common()
559 (atomic_xchg(&lock->count, -1) == 1)) __mutex_lock_common()
572 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); __mutex_lock_common()
579 /* didn't get the lock, go to sleep: */ __mutex_lock_common()
580 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
582 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
586 mutex_remove_waiter(lock, &waiter, current_thread_info()); __mutex_lock_common()
588 if (likely(list_empty(&lock->wait_list))) __mutex_lock_common()
589 atomic_set(&lock->count, 0); __mutex_lock_common()
593 /* got the lock - cleanup and rejoice! */ __mutex_lock_common()
594 lock_acquired(&lock->dep_map, ip); __mutex_lock_common()
595 mutex_set_owner(lock); __mutex_lock_common()
598 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); __mutex_lock_common()
602 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); __mutex_lock_common()
608 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
610 mutex_release(&lock->dep_map, 1, ip); __mutex_lock_common()
617 mutex_lock_nested(struct mutex *lock, unsigned int subclass) mutex_lock_nested() argument
620 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, mutex_lock_nested()
627 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) _mutex_lock_nest_lock() argument
630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, _mutex_lock_nest_lock()
637 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) mutex_lock_killable_nested() argument
640 return __mutex_lock_common(lock, TASK_KILLABLE, mutex_lock_killable_nested()
646 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) mutex_lock_interruptible_nested() argument
649 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, mutex_lock_interruptible_nested()
656 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_deadlock_injection() argument
670 ctx->contending_lock = lock; ww_mutex_deadlock_injection()
672 ww_mutex_unlock(lock); ww_mutex_deadlock_injection()
682 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock() argument
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, __ww_mutex_lock()
690 return ww_mutex_deadlock_injection(lock, ctx); __ww_mutex_lock()
697 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_interruptible() argument
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, __ww_mutex_lock_interruptible()
706 return ww_mutex_deadlock_injection(lock, ctx); __ww_mutex_lock_interruptible()
715 * Release the lock, slowpath:
718 __mutex_unlock_common_slowpath(struct mutex *lock, int nested) __mutex_unlock_common_slowpath() argument
723 * As a performance measurement, release the lock before doing other __mutex_unlock_common_slowpath()
725 * the lock sooner, while still handling cleanups in past unlock calls. __mutex_unlock_common_slowpath()
730 * Some architectures leave the lock unlocked in the fastpath failure __mutex_unlock_common_slowpath()
732 * unlock it here - as the lock counter is currently 0 or negative. __mutex_unlock_common_slowpath()
735 atomic_set(&lock->count, 1); __mutex_unlock_common_slowpath()
737 spin_lock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath()
738 mutex_release(&lock->dep_map, nested, _RET_IP_); __mutex_unlock_common_slowpath()
739 debug_mutex_unlock(lock); __mutex_unlock_common_slowpath()
741 if (!list_empty(&lock->wait_list)) { __mutex_unlock_common_slowpath()
744 list_entry(lock->wait_list.next, __mutex_unlock_common_slowpath()
747 debug_mutex_wake_waiter(lock, waiter); __mutex_unlock_common_slowpath()
752 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath()
756 * Release the lock, slowpath:
761 struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_unlock_slowpath() local
763 __mutex_unlock_common_slowpath(lock, 1); __mutex_unlock_slowpath()
772 __mutex_lock_killable_slowpath(struct mutex *lock);
775 __mutex_lock_interruptible_slowpath(struct mutex *lock);
779 * @lock: the mutex to be acquired
783 * signal arrives while waiting for the lock then this function
788 int __sched mutex_lock_interruptible(struct mutex *lock) mutex_lock_interruptible() argument
793 ret = __mutex_fastpath_lock_retval(&lock->count); mutex_lock_interruptible()
795 mutex_set_owner(lock); mutex_lock_interruptible()
798 return __mutex_lock_interruptible_slowpath(lock); mutex_lock_interruptible()
803 int __sched mutex_lock_killable(struct mutex *lock) mutex_lock_killable() argument
808 ret = __mutex_fastpath_lock_retval(&lock->count); mutex_lock_killable()
810 mutex_set_owner(lock); mutex_lock_killable()
813 return __mutex_lock_killable_slowpath(lock); mutex_lock_killable()
820 struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_lock_slowpath() local
822 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, __mutex_lock_slowpath()
827 __mutex_lock_killable_slowpath(struct mutex *lock) __mutex_lock_killable_slowpath() argument
829 return __mutex_lock_common(lock, TASK_KILLABLE, 0, __mutex_lock_killable_slowpath()
834 __mutex_lock_interruptible_slowpath(struct mutex *lock) __mutex_lock_interruptible_slowpath() argument
836 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, __mutex_lock_interruptible_slowpath()
841 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_slowpath() argument
843 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, __ww_mutex_lock_slowpath()
848 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, __ww_mutex_lock_interruptible_slowpath() argument
851 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, __ww_mutex_lock_interruptible_slowpath()
859 * can get the lock:
863 struct mutex *lock = container_of(lock_count, struct mutex, count); __mutex_trylock_slowpath() local
868 if (mutex_is_locked(lock)) __mutex_trylock_slowpath()
871 spin_lock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath()
873 prev = atomic_xchg(&lock->count, -1); __mutex_trylock_slowpath()
875 mutex_set_owner(lock); __mutex_trylock_slowpath()
876 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); __mutex_trylock_slowpath()
880 if (likely(list_empty(&lock->wait_list))) __mutex_trylock_slowpath()
881 atomic_set(&lock->count, 0); __mutex_trylock_slowpath()
883 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath()
890 * @lock: the mutex to be acquired
902 int __sched mutex_trylock(struct mutex *lock) mutex_trylock() argument
906 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); mutex_trylock()
908 mutex_set_owner(lock); mutex_trylock()
916 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock() argument
922 ret = __mutex_fastpath_lock_retval(&lock->base.count); __ww_mutex_lock()
925 ww_mutex_set_context_fastpath(lock, ctx); __ww_mutex_lock()
926 mutex_set_owner(&lock->base); __ww_mutex_lock()
928 ret = __ww_mutex_lock_slowpath(lock, ctx); __ww_mutex_lock()
934 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_interruptible() argument
940 ret = __mutex_fastpath_lock_retval(&lock->base.count); __ww_mutex_lock_interruptible()
943 ww_mutex_set_context_fastpath(lock, ctx); __ww_mutex_lock_interruptible()
944 mutex_set_owner(&lock->base); __ww_mutex_lock_interruptible()
946 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); __ww_mutex_lock_interruptible()
956 * @lock: the mutex to return holding if we dec to 0
958 * return true and hold lock if we dec to 0, return false otherwise
960 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) atomic_dec_and_mutex_lock() argument
965 /* we might hit 0, so take the lock */ atomic_dec_and_mutex_lock()
966 mutex_lock(lock); atomic_dec_and_mutex_lock()
969 mutex_unlock(lock); atomic_dec_and_mutex_lock()
972 /* we hit 0, and we hold the lock */ atomic_dec_and_mutex_lock()
H A Drtmutex-debug.h13 rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
17 extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
18 extern void debug_rt_mutex_lock(struct rt_mutex *lock);
19 extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
20 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
22 extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
25 struct rt_mutex *lock);
H A Drtmutex_common.h21 * threads to provoke lock stealing and testing of complex boosting scenarios.
25 extern void schedule_rt_mutex_test(struct rt_mutex *lock);
51 struct rt_mutex *lock; member in struct:rt_mutex_waiter
63 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) rt_mutex_has_waiters() argument
65 return !RB_EMPTY_ROOT(&lock->waiters); rt_mutex_has_waiters()
69 rt_mutex_top_waiter(struct rt_mutex *lock) rt_mutex_top_waiter() argument
73 w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter, rt_mutex_top_waiter()
75 BUG_ON(w->lock != lock); rt_mutex_top_waiter()
93 * lock->owner state tracking:
98 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) rt_mutex_owner() argument
101 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); rt_mutex_owner()
108 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
112 * walk of the lock chain.
122 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
123 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
125 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
127 extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
130 extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
H A Drwsem-xadd.c6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
44 * (2) 1 writer active or attempting lock, no waiters for lock
48 * or in the process of attempting lock.
50 * Note: writer can attempt to steal lock for this count by adding
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
66 * steal the lock.
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
112 * handle the lock release when processes blocked on it that can now run
133 * grant it the lock yet as we want other writers __rwsem_do_wake()
141 /* Writers might steal the lock before we grant it to the next reader. __rwsem_do_wake()
143 * so we can bail out early if a writer stole the lock. __rwsem_do_wake()
151 /* A writer stole the lock. Undo our reader grant. */ __rwsem_do_wake()
211 * Wait for the read lock to be granted
230 /* we're now waiting on the lock, but no longer actively locking */ rwsem_down_read_failed()
245 /* wait to be given the lock */ rwsem_down_read_failed()
261 * Try acquiring the write lock. Check count first in order rwsem_try_write_lock()
278 * Try to acquire write lock before the writer has been put on wait queue.
312 * slowpath with the lock being active, then there is a possibility rwsem_can_spin_on_owner()
313 * reader(s) may have the lock. To be safe, bail spinning in these rwsem_can_spin_on_owner()
356 * When the owner is not set, the lock could be free or rwsem_spin_on_owner()
391 * owner acquiring the lock and setting the owner field. If rwsem_optimistic_spin()
392 * we're an RT task that will live-lock because we won't let rwsem_optimistic_spin()
420 * Wait until we successfully acquire the write lock
432 /* do optimistic spinning and steal lock if possible */ rwsem_down_write_failed()
451 /* we're now waiting on the lock, but no longer actively locking */ rwsem_down_write_failed()
457 * no active writers, the lock must be read owned; so we try to rwsem_down_write_failed()
466 /* wait until we successfully acquire the lock */ rwsem_down_write_failed()
512 * downgrade a write lock into a read lock
H A Dosq_lock.c6 * An MCS like lock especially tailored for optimistic spinning for sleeping
7 * lock implementations (mutex, rwsem, etc).
33 * Can return NULL in case we were the last queued and we updated @lock instead.
36 osq_wait_next(struct optimistic_spin_queue *lock, osq_wait_next() argument
52 if (atomic_read(&lock->tail) == curr && osq_wait_next()
53 atomic_cmpxchg(&lock->tail, curr, old) == curr) { osq_wait_next()
55 * We were the last queued, we moved @lock back. @prev osq_wait_next()
56 * will now observe @lock and will complete its osq_wait_next()
69 * wait for either @lock to point to us, through its Step-B, or osq_wait_next()
84 bool osq_lock(struct optimistic_spin_queue *lock) osq_lock() argument
95 old = atomic_xchg(&lock->tail, curr); osq_lock()
128 * unlock()/unqueue() wait for a next pointer since @lock points to us osq_lock()
157 * Similar to unlock(), wait for @node->next or move @lock from @node osq_lock()
161 next = osq_wait_next(lock, node, prev); osq_lock()
179 void osq_unlock(struct optimistic_spin_queue *lock) osq_unlock() argument
187 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) osq_unlock()
200 next = osq_wait_next(lock, node, NULL); osq_unlock()
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_lock.c49 /* lock types */
93 * Converts lock policy from local format to on the wire lock_desc format
107 * Converts lock policy from on the wire lock_desc format to local format
167 * Get a reference on a lock.
171 * - one for being a lock that's in-use
172 * - one for the addref associated with a new lock
174 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock) ldlm_lock_get() argument
176 atomic_inc(&lock->l_refc); ldlm_lock_get()
177 return lock; ldlm_lock_get()
182 * Release lock reference.
184 * Also frees the lock if it was last reference.
186 void ldlm_lock_put(struct ldlm_lock *lock) ldlm_lock_put() argument
188 LASSERT(lock->l_resource != LP_POISON); ldlm_lock_put()
189 LASSERT(atomic_read(&lock->l_refc) > 0); ldlm_lock_put()
190 if (atomic_dec_and_test(&lock->l_refc)) { ldlm_lock_put()
193 LDLM_DEBUG(lock, ldlm_lock_put()
194 "final lock_put on destroyed lock, freeing it."); ldlm_lock_put()
196 res = lock->l_resource; ldlm_lock_put()
197 LASSERT(lock->l_flags & LDLM_FL_DESTROYED); ldlm_lock_put()
198 LASSERT(list_empty(&lock->l_res_link)); ldlm_lock_put()
199 LASSERT(list_empty(&lock->l_pending_chain)); ldlm_lock_put()
203 lu_ref_del(&res->lr_reference, "lock", lock); ldlm_lock_put()
205 lock->l_resource = NULL; ldlm_lock_put()
206 if (lock->l_export) { ldlm_lock_put()
207 class_export_lock_put(lock->l_export, lock); ldlm_lock_put()
208 lock->l_export = NULL; ldlm_lock_put()
211 if (lock->l_lvb_data != NULL) ldlm_lock_put()
212 OBD_FREE(lock->l_lvb_data, lock->l_lvb_len); ldlm_lock_put()
214 ldlm_interval_free(ldlm_interval_detach(lock)); ldlm_lock_put()
215 lu_ref_fini(&lock->l_reference); ldlm_lock_put()
216 OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle); ldlm_lock_put()
222 * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
224 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock) ldlm_lock_remove_from_lru_nolock() argument
228 if (!list_empty(&lock->l_lru)) { ldlm_lock_remove_from_lru_nolock()
229 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lock_remove_from_lru_nolock()
231 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); ldlm_lock_remove_from_lru_nolock()
232 list_del_init(&lock->l_lru); ldlm_lock_remove_from_lru_nolock()
241 * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
243 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock) ldlm_lock_remove_from_lru() argument
245 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lock_remove_from_lru()
248 if (lock->l_flags & LDLM_FL_NS_SRV) { ldlm_lock_remove_from_lru()
249 LASSERT(list_empty(&lock->l_lru)); ldlm_lock_remove_from_lru()
254 rc = ldlm_lock_remove_from_lru_nolock(lock); ldlm_lock_remove_from_lru()
260 * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
262 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock) ldlm_lock_add_to_lru_nolock() argument
264 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lock_add_to_lru_nolock()
266 lock->l_last_used = cfs_time_current(); ldlm_lock_add_to_lru_nolock()
267 LASSERT(list_empty(&lock->l_lru)); ldlm_lock_add_to_lru_nolock()
268 LASSERT(lock->l_resource->lr_type != LDLM_FLOCK); ldlm_lock_add_to_lru_nolock()
269 list_add_tail(&lock->l_lru, &ns->ns_unused_list); ldlm_lock_add_to_lru_nolock()
270 if (lock->l_flags & LDLM_FL_SKIPPED) ldlm_lock_add_to_lru_nolock()
271 lock->l_flags &= ~LDLM_FL_SKIPPED; ldlm_lock_add_to_lru_nolock()
277 * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
280 void ldlm_lock_add_to_lru(struct ldlm_lock *lock) ldlm_lock_add_to_lru() argument
282 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lock_add_to_lru()
285 ldlm_lock_add_to_lru_nolock(lock); ldlm_lock_add_to_lru()
290 * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
293 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock) ldlm_lock_touch_in_lru() argument
295 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lock_touch_in_lru()
297 if (lock->l_flags & LDLM_FL_NS_SRV) { ldlm_lock_touch_in_lru()
298 LASSERT(list_empty(&lock->l_lru)); ldlm_lock_touch_in_lru()
303 if (!list_empty(&lock->l_lru)) { ldlm_lock_touch_in_lru()
304 ldlm_lock_remove_from_lru_nolock(lock); ldlm_lock_touch_in_lru()
305 ldlm_lock_add_to_lru_nolock(lock); ldlm_lock_touch_in_lru()
311 * Helper to destroy a locked lock.
316 * Does not actually free the lock data, but rather marks the lock as
317 * destroyed by setting l_destroyed field in the lock to 1. Destroys a
318 * handle->lock association too, so that the lock can no longer be found
319 * and removes the lock from LRU list. Actual lock freeing occurs when
320 * last lock reference goes away.
324 * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
326 * ldlm_lock_destroy, you can never drop your final references on this lock.
329 int ldlm_lock_destroy_internal(struct ldlm_lock *lock) ldlm_lock_destroy_internal() argument
331 if (lock->l_readers || lock->l_writers) { ldlm_lock_destroy_internal()
332 LDLM_ERROR(lock, "lock still has references"); ldlm_lock_destroy_internal()
336 if (!list_empty(&lock->l_res_link)) { ldlm_lock_destroy_internal()
337 LDLM_ERROR(lock, "lock still on resource"); ldlm_lock_destroy_internal()
341 if (lock->l_flags & LDLM_FL_DESTROYED) { ldlm_lock_destroy_internal()
342 LASSERT(list_empty(&lock->l_lru)); ldlm_lock_destroy_internal()
345 lock->l_flags |= LDLM_FL_DESTROYED; ldlm_lock_destroy_internal()
347 if (lock->l_export && lock->l_export->exp_lock_hash) { ldlm_lock_destroy_internal()
348 /* NB: it's safe to call cfs_hash_del() even lock isn't ldlm_lock_destroy_internal()
353 cfs_hash_del(lock->l_export->exp_lock_hash, ldlm_lock_destroy_internal()
354 &lock->l_remote_handle, &lock->l_exp_hash); ldlm_lock_destroy_internal()
357 ldlm_lock_remove_from_lru(lock); ldlm_lock_destroy_internal()
358 class_handle_unhash(&lock->l_handle); ldlm_lock_destroy_internal()
361 /* Wake anyone waiting for this lock */ ldlm_lock_destroy_internal()
364 if (lock->l_export) ldlm_lock_destroy_internal()
365 class_export_put(lock->l_export); ldlm_lock_destroy_internal()
366 lock->l_export = NULL; ldlm_lock_destroy_internal()
367 if (lock->l_export && lock->l_completion_ast) ldlm_lock_destroy_internal()
368 lock->l_completion_ast(lock, 0); ldlm_lock_destroy_internal()
374 * Destroys a LDLM lock \a lock. Performs necessary locking first.
376 void ldlm_lock_destroy(struct ldlm_lock *lock) ldlm_lock_destroy() argument
380 lock_res_and_lock(lock); ldlm_lock_destroy()
381 first = ldlm_lock_destroy_internal(lock); ldlm_lock_destroy()
382 unlock_res_and_lock(lock); ldlm_lock_destroy()
386 lu_ref_del(&lock->l_reference, "hash", lock); ldlm_lock_destroy()
387 LDLM_LOCK_RELEASE(lock); ldlm_lock_destroy()
392 * Destroys a LDLM lock \a lock that is already locked.
394 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock) ldlm_lock_destroy_nolock() argument
398 first = ldlm_lock_destroy_internal(lock); ldlm_lock_destroy_nolock()
401 lu_ref_del(&lock->l_reference, "hash", lock); ldlm_lock_destroy_nolock()
402 LDLM_LOCK_RELEASE(lock); ldlm_lock_destroy_nolock()
406 /* this is called by portals_handle2object with the handle lock taken */ lock_handle_addref()
407 static void lock_handle_addref(void *lock) lock_handle_addref() argument
409 LDLM_LOCK_GET((struct ldlm_lock *)lock); lock_handle_addref()
412 static void lock_handle_free(void *lock, int size) lock_handle_free() argument
415 OBD_SLAB_FREE(lock, ldlm_lock_slab, size); lock_handle_free()
425 * Allocate and initialize new lock structure.
428 * new lock will take over the refcount.
429 * returns: lock with refcount 2 - one for current caller and one for remote
433 struct ldlm_lock *lock; ldlm_lock_new() local
438 OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, GFP_NOFS); ldlm_lock_new()
439 if (lock == NULL) ldlm_lock_new()
442 spin_lock_init(&lock->l_lock); ldlm_lock_new()
443 lock->l_resource = resource; ldlm_lock_new()
444 lu_ref_add(&resource->lr_reference, "lock", lock); ldlm_lock_new()
446 atomic_set(&lock->l_refc, 2); ldlm_lock_new()
447 INIT_LIST_HEAD(&lock->l_res_link); ldlm_lock_new()
448 INIT_LIST_HEAD(&lock->l_lru); ldlm_lock_new()
449 INIT_LIST_HEAD(&lock->l_pending_chain); ldlm_lock_new()
450 INIT_LIST_HEAD(&lock->l_bl_ast); ldlm_lock_new()
451 INIT_LIST_HEAD(&lock->l_cp_ast); ldlm_lock_new()
452 INIT_LIST_HEAD(&lock->l_rk_ast); ldlm_lock_new()
453 init_waitqueue_head(&lock->l_waitq); ldlm_lock_new()
454 lock->l_blocking_lock = NULL; ldlm_lock_new()
455 INIT_LIST_HEAD(&lock->l_sl_mode); ldlm_lock_new()
456 INIT_LIST_HEAD(&lock->l_sl_policy); ldlm_lock_new()
457 INIT_HLIST_NODE(&lock->l_exp_hash); ldlm_lock_new()
458 INIT_HLIST_NODE(&lock->l_exp_flock_hash); ldlm_lock_new()
462 INIT_LIST_HEAD(&lock->l_handle.h_link); ldlm_lock_new()
463 class_handle_hash(&lock->l_handle, &lock_handle_ops); ldlm_lock_new()
465 lu_ref_init(&lock->l_reference); ldlm_lock_new()
466 lu_ref_add(&lock->l_reference, "hash", lock); ldlm_lock_new()
467 lock->l_callback_timeout = 0; ldlm_lock_new()
470 INIT_LIST_HEAD(&lock->l_exp_refs_link); ldlm_lock_new()
471 lock->l_exp_refs_nr = 0; ldlm_lock_new()
472 lock->l_exp_refs_target = NULL; ldlm_lock_new()
474 INIT_LIST_HEAD(&lock->l_exp_list); ldlm_lock_new()
476 return lock; ldlm_lock_new()
480 * Moves LDLM lock \a lock to another resource.
481 * This is used on client when server returns some other lock than requested
484 int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, ldlm_lock_change_resource() argument
487 struct ldlm_resource *oldres = lock->l_resource; ldlm_lock_change_resource()
493 lock_res_and_lock(lock); ldlm_lock_change_resource()
494 if (memcmp(new_resid, &lock->l_resource->lr_name, ldlm_lock_change_resource()
495 sizeof(lock->l_resource->lr_name)) == 0) { ldlm_lock_change_resource()
497 unlock_res_and_lock(lock); ldlm_lock_change_resource()
503 /* This function assumes that the lock isn't on any lists */ ldlm_lock_change_resource()
504 LASSERT(list_empty(&lock->l_res_link)); ldlm_lock_change_resource()
507 unlock_res_and_lock(lock); ldlm_lock_change_resource()
513 lu_ref_add(&newres->lr_reference, "lock", lock); ldlm_lock_change_resource()
515 * To flip the lock from the old to the new resource, lock, oldres and ldlm_lock_change_resource()
517 * lock->l_lock, and are taken in the memory address order to avoid ldlm_lock_change_resource()
520 spin_lock(&lock->l_lock); ldlm_lock_change_resource()
521 oldres = lock->l_resource; ldlm_lock_change_resource()
531 lock->l_resource = newres; ldlm_lock_change_resource()
533 unlock_res_and_lock(lock); ldlm_lock_change_resource()
536 lu_ref_del(&oldres->lr_reference, "lock", lock); ldlm_lock_change_resource()
549 * Fills in handle for LDLM lock \a lock into supplied \a lockh
552 void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) ldlm_lock2handle() argument
554 lockh->cookie = lock->l_handle.h_cookie; ldlm_lock2handle()
559 * Obtain a lock reference by handle.
561 * if \a flags: atomically get the lock and set the flags.
567 struct ldlm_lock *lock; __ldlm_handle2lock() local
571 lock = class_handle2object(handle->cookie); __ldlm_handle2lock()
572 if (lock == NULL) __ldlm_handle2lock()
575 /* It's unlikely but possible that someone marked the lock as __ldlm_handle2lock()
577 if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { __ldlm_handle2lock()
578 lu_ref_add(&lock->l_reference, "handle", current); __ldlm_handle2lock()
579 return lock; __ldlm_handle2lock()
582 lock_res_and_lock(lock); __ldlm_handle2lock()
584 LASSERT(lock->l_resource != NULL); __ldlm_handle2lock()
586 lu_ref_add_atomic(&lock->l_reference, "handle", current); __ldlm_handle2lock()
587 if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { __ldlm_handle2lock()
588 unlock_res_and_lock(lock); __ldlm_handle2lock()
589 CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); __ldlm_handle2lock()
590 LDLM_LOCK_PUT(lock); __ldlm_handle2lock()
594 if (flags && (lock->l_flags & flags)) { __ldlm_handle2lock()
595 unlock_res_and_lock(lock); __ldlm_handle2lock()
596 LDLM_LOCK_PUT(lock); __ldlm_handle2lock()
601 lock->l_flags |= flags; __ldlm_handle2lock()
603 unlock_res_and_lock(lock); __ldlm_handle2lock()
604 return lock; __ldlm_handle2lock()
610 * Fill in "on the wire" representation for given LDLM lock into supplied
611 * lock descriptor \a desc structure.
613 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) ldlm_lock2desc() argument
615 ldlm_res2desc(lock->l_resource, &desc->l_resource); ldlm_lock2desc()
616 desc->l_req_mode = lock->l_req_mode; ldlm_lock2desc()
617 desc->l_granted_mode = lock->l_granted_mode; ldlm_lock2desc()
618 ldlm_convert_policy_to_wire(lock->l_resource->lr_type, ldlm_lock2desc()
619 &lock->l_policy_data, ldlm_lock2desc()
625 * Add a lock to list of conflicting locks to send AST to.
627 * Only add if we have not sent a blocking AST to the lock yet.
629 void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, ldlm_add_bl_work_item() argument
632 if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) { ldlm_add_bl_work_item()
633 LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); ldlm_add_bl_work_item()
634 lock->l_flags |= LDLM_FL_AST_SENT; ldlm_add_bl_work_item()
638 lock->l_flags |= LDLM_FL_DISCARD_DATA; ldlm_add_bl_work_item()
639 LASSERT(list_empty(&lock->l_bl_ast)); ldlm_add_bl_work_item()
640 list_add(&lock->l_bl_ast, work_list); ldlm_add_bl_work_item()
641 LDLM_LOCK_GET(lock); ldlm_add_bl_work_item()
642 LASSERT(lock->l_blocking_lock == NULL); ldlm_add_bl_work_item()
643 lock->l_blocking_lock = LDLM_LOCK_GET(new); ldlm_add_bl_work_item()
648 * Add a lock to list of just granted locks to send completion AST to.
650 void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list) ldlm_add_cp_work_item() argument
652 if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) { ldlm_add_cp_work_item()
653 lock->l_flags |= LDLM_FL_CP_REQD; ldlm_add_cp_work_item()
654 LDLM_DEBUG(lock, "lock granted; sending completion AST."); ldlm_add_cp_work_item()
655 LASSERT(list_empty(&lock->l_cp_ast)); ldlm_add_cp_work_item()
656 list_add(&lock->l_cp_ast, work_list); ldlm_add_cp_work_item()
657 LDLM_LOCK_GET(lock); ldlm_add_cp_work_item()
667 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, ldlm_add_ast_work_item() argument
670 check_res_locked(lock->l_resource); ldlm_add_ast_work_item()
672 ldlm_add_bl_work_item(lock, new, work_list); ldlm_add_ast_work_item()
674 ldlm_add_cp_work_item(lock, work_list); ldlm_add_ast_work_item()
678 * Add specified reader/writer reference to LDLM lock with handle \a lockh.
684 struct ldlm_lock *lock; ldlm_lock_addref() local
686 lock = ldlm_handle2lock(lockh); ldlm_lock_addref()
687 LASSERT(lock != NULL); ldlm_lock_addref()
688 ldlm_lock_addref_internal(lock, mode); ldlm_lock_addref()
689 LDLM_LOCK_PUT(lock); ldlm_lock_addref()
695 * Add specified reader/writer reference to LDLM lock \a lock.
697 * Removes lock from LRU if it is there.
698 * Assumes the LDLM lock is already locked.
700 void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode) ldlm_lock_addref_internal_nolock() argument
702 ldlm_lock_remove_from_lru(lock); ldlm_lock_addref_internal_nolock()
704 lock->l_readers++; ldlm_lock_addref_internal_nolock()
705 lu_ref_add_atomic(&lock->l_reference, "reader", lock); ldlm_lock_addref_internal_nolock()
708 lock->l_writers++; ldlm_lock_addref_internal_nolock()
709 lu_ref_add_atomic(&lock->l_reference, "writer", lock); ldlm_lock_addref_internal_nolock()
711 LDLM_LOCK_GET(lock); ldlm_lock_addref_internal_nolock()
712 lu_ref_add_atomic(&lock->l_reference, "user", lock); ldlm_lock_addref_internal_nolock()
713 LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]); ldlm_lock_addref_internal_nolock()
717 * Attempts to add reader/writer reference to a lock with handle \a lockh, and
718 * fails if lock is already LDLM_FL_CBPENDING or destroyed.
720 * \retval 0 success, lock was addref-ed
722 * \retval -EAGAIN lock is being canceled.
726 struct ldlm_lock *lock; ldlm_lock_addref_try() local
730 lock = ldlm_handle2lock(lockh); ldlm_lock_addref_try()
731 if (lock != NULL) { ldlm_lock_addref_try()
732 lock_res_and_lock(lock); ldlm_lock_addref_try()
733 if (lock->l_readers != 0 || lock->l_writers != 0 || ldlm_lock_addref_try()
734 !(lock->l_flags & LDLM_FL_CBPENDING)) { ldlm_lock_addref_try()
735 ldlm_lock_addref_internal_nolock(lock, mode); ldlm_lock_addref_try()
738 unlock_res_and_lock(lock); ldlm_lock_addref_try()
739 LDLM_LOCK_PUT(lock); ldlm_lock_addref_try()
746 * Add specified reader/writer reference to LDLM lock \a lock.
747 * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
750 void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_addref_internal() argument
752 lock_res_and_lock(lock); ldlm_lock_addref_internal()
753 ldlm_lock_addref_internal_nolock(lock, mode); ldlm_lock_addref_internal()
754 unlock_res_and_lock(lock); ldlm_lock_addref_internal()
758 * Removes reader/writer reference for LDLM lock \a lock.
759 * Assumes LDLM lock is already locked.
761 * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
764 void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode) ldlm_lock_decref_internal_nolock() argument
766 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); ldlm_lock_decref_internal_nolock()
768 LASSERT(lock->l_readers > 0); ldlm_lock_decref_internal_nolock()
769 lu_ref_del(&lock->l_reference, "reader", lock); ldlm_lock_decref_internal_nolock()
770 lock->l_readers--; ldlm_lock_decref_internal_nolock()
773 LASSERT(lock->l_writers > 0); ldlm_lock_decref_internal_nolock()
774 lu_ref_del(&lock->l_reference, "writer", lock); ldlm_lock_decref_internal_nolock()
775 lock->l_writers--; ldlm_lock_decref_internal_nolock()
778 lu_ref_del(&lock->l_reference, "user", lock); ldlm_lock_decref_internal_nolock()
779 LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */ ldlm_lock_decref_internal_nolock()
783 * Removes reader/writer reference for LDLM lock \a lock.
784 * Locks LDLM lock first.
785 * If the lock is determined to be client lock on a client and r/w refcount
786 * drops to zero and the lock is not blocked, the lock is added to LRU lock
790 void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_lock_decref_internal() argument
794 lock_res_and_lock(lock); ldlm_lock_decref_internal()
796 ns = ldlm_lock_to_ns(lock); ldlm_lock_decref_internal()
798 ldlm_lock_decref_internal_nolock(lock, mode); ldlm_lock_decref_internal()
800 if (lock->l_flags & LDLM_FL_LOCAL && ldlm_lock_decref_internal()
801 !lock->l_readers && !lock->l_writers) { ldlm_lock_decref_internal()
802 /* If this is a local lock on a server namespace and this was ldlm_lock_decref_internal()
803 * the last reference, cancel the lock. */ ldlm_lock_decref_internal()
804 CDEBUG(D_INFO, "forcing cancel of local lock\n"); ldlm_lock_decref_internal()
805 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_lock_decref_internal()
808 if (!lock->l_readers && !lock->l_writers && ldlm_lock_decref_internal()
809 (lock->l_flags & LDLM_FL_CBPENDING)) { ldlm_lock_decref_internal()
812 if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export) ldlm_lock_decref_internal()
813 CERROR("FL_CBPENDING set on non-local lock--just a warning\n"); ldlm_lock_decref_internal()
815 LDLM_DEBUG(lock, "final decref done on cbpending lock"); ldlm_lock_decref_internal()
817 LDLM_LOCK_GET(lock); /* dropped by bl thread */ ldlm_lock_decref_internal()
818 ldlm_lock_remove_from_lru(lock); ldlm_lock_decref_internal()
819 unlock_res_and_lock(lock); ldlm_lock_decref_internal()
821 if (lock->l_flags & LDLM_FL_FAIL_LOC) ldlm_lock_decref_internal()
824 if ((lock->l_flags & LDLM_FL_ATOMIC_CB) || ldlm_lock_decref_internal()
825 ldlm_bl_to_thread_lock(ns, NULL, lock) != 0) ldlm_lock_decref_internal()
826 ldlm_handle_bl_callback(ns, NULL, lock); ldlm_lock_decref_internal()
828 !lock->l_readers && !lock->l_writers && ldlm_lock_decref_internal()
829 !(lock->l_flags & LDLM_FL_NO_LRU) && ldlm_lock_decref_internal()
830 !(lock->l_flags & LDLM_FL_BL_AST)) { ldlm_lock_decref_internal()
832 LDLM_DEBUG(lock, "add lock into lru list"); ldlm_lock_decref_internal()
836 ldlm_lock_add_to_lru(lock); ldlm_lock_decref_internal()
837 unlock_res_and_lock(lock); ldlm_lock_decref_internal()
839 if (lock->l_flags & LDLM_FL_FAIL_LOC) ldlm_lock_decref_internal()
845 if (!exp_connect_cancelset(lock->l_conn_export) && ldlm_lock_decref_internal()
849 LDLM_DEBUG(lock, "do not add lock into lru list"); ldlm_lock_decref_internal()
850 unlock_res_and_lock(lock); ldlm_lock_decref_internal()
855 * Decrease reader/writer refcount for LDLM lock with handle \a lockh
859 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); ldlm_lock_decref() local
861 LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie); ldlm_lock_decref()
862 ldlm_lock_decref_internal(lock, mode); ldlm_lock_decref()
863 LDLM_LOCK_PUT(lock); ldlm_lock_decref()
868 * Decrease reader/writer refcount for LDLM lock with handle
876 struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); ldlm_lock_decref_and_cancel() local
878 LASSERT(lock != NULL); ldlm_lock_decref_and_cancel()
880 LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); ldlm_lock_decref_and_cancel()
881 lock_res_and_lock(lock); ldlm_lock_decref_and_cancel()
882 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_lock_decref_and_cancel()
883 unlock_res_and_lock(lock); ldlm_lock_decref_and_cancel()
884 ldlm_lock_decref_internal(lock, mode); ldlm_lock_decref_and_cancel()
885 LDLM_LOCK_PUT(lock); ldlm_lock_decref_and_cancel()
896 * Finds a position to insert the new lock into granted lock list.
902 * req [input]: the lock whose position to be located;
914 struct ldlm_lock *lock, *mode_end, *policy_end; search_granted_lock() local
917 lock = list_entry(tmp, struct ldlm_lock, l_res_link); list_for_each()
919 mode_end = list_entry(lock->l_sl_mode.prev, list_for_each()
922 if (lock->l_req_mode != req->l_req_mode) { list_for_each()
923 /* jump to last lock of mode group */ list_for_each()
929 if (lock->l_resource->lr_type == LDLM_PLAIN) { list_for_each()
930 /* insert point is last lock of the mode group */ list_for_each()
935 } else if (lock->l_resource->lr_type == LDLM_IBITS) { list_for_each()
938 list_entry(lock->l_sl_policy.prev, list_for_each()
942 if (lock->l_policy_data.l_inodebits.bits == list_for_each()
944 /* insert point is last lock of list_for_each()
961 lock = list_entry(tmp, struct ldlm_lock, list_for_each()
965 /* insert point is last lock of the mode group, list_for_each()
972 LDLM_ERROR(lock, list_for_each()
973 "is not LDLM_PLAIN or LDLM_IBITS lock"); list_for_each()
978 /* insert point is last lock on the queue,
986 * Add a lock into resource granted list after a position described by
989 static void ldlm_granted_list_add_lock(struct ldlm_lock *lock, ldlm_granted_list_add_lock() argument
992 struct ldlm_resource *res = lock->l_resource; ldlm_granted_list_add_lock()
997 LDLM_DEBUG(lock, "About to add lock:"); ldlm_granted_list_add_lock()
999 if (lock->l_flags & LDLM_FL_DESTROYED) { ldlm_granted_list_add_lock()
1004 LASSERT(list_empty(&lock->l_res_link)); ldlm_granted_list_add_lock()
1005 LASSERT(list_empty(&lock->l_sl_mode)); ldlm_granted_list_add_lock()
1006 LASSERT(list_empty(&lock->l_sl_policy)); ldlm_granted_list_add_lock()
1009 * lock->link == prev->link means lock is first starting the group. ldlm_granted_list_add_lock()
1012 if (&lock->l_res_link != prev->res_link) ldlm_granted_list_add_lock()
1013 list_add(&lock->l_res_link, prev->res_link); ldlm_granted_list_add_lock()
1014 if (&lock->l_sl_mode != prev->mode_link) ldlm_granted_list_add_lock()
1015 list_add(&lock->l_sl_mode, prev->mode_link); ldlm_granted_list_add_lock()
1016 if (&lock->l_sl_policy != prev->policy_link) ldlm_granted_list_add_lock()
1017 list_add(&lock->l_sl_policy, prev->policy_link); ldlm_granted_list_add_lock()
1021 * Add a lock to granted list on a resource maintaining skiplist
1024 static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock) ldlm_grant_lock_with_skiplist() argument
1028 LASSERT(lock->l_req_mode == lock->l_granted_mode); ldlm_grant_lock_with_skiplist()
1030 search_granted_lock(&lock->l_resource->lr_granted, lock, &prev); ldlm_grant_lock_with_skiplist()
1031 ldlm_granted_list_add_lock(lock, &prev); ldlm_grant_lock_with_skiplist()
1035 * Perform lock granting bookkeeping.
1037 * Includes putting the lock into granted list and updating lock mode.
1045 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) ldlm_grant_lock() argument
1047 struct ldlm_resource *res = lock->l_resource; ldlm_grant_lock()
1051 lock->l_granted_mode = lock->l_req_mode; ldlm_grant_lock()
1053 ldlm_grant_lock_with_skiplist(lock); ldlm_grant_lock()
1055 ldlm_extent_add_lock(res, lock); ldlm_grant_lock()
1057 ldlm_resource_add_lock(res, &res->lr_granted, lock); ldlm_grant_lock()
1059 if (lock->l_granted_mode < res->lr_most_restr) ldlm_grant_lock()
1060 res->lr_most_restr = lock->l_granted_mode; ldlm_grant_lock()
1062 if (work_list && lock->l_completion_ast != NULL) ldlm_grant_lock()
1063 ldlm_add_ast_work_item(lock, NULL, work_list); ldlm_grant_lock()
1065 ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); ldlm_grant_lock()
1069 * Search for a lock with given properties in a queue.
1071 * \retval a referenced lock or NULL. See the flag descriptions below, in the
1080 struct ldlm_lock *lock; search_queue() local
1086 lock = list_entry(tmp, struct ldlm_lock, l_res_link); list_for_each()
1088 if (lock == old_lock) list_for_each()
1091 /* Check if this lock can be matched. list_for_each()
1092 * Used by LU-2919(exclusive open) for open lease lock */ list_for_each()
1093 if (ldlm_is_excl(lock)) list_for_each()
1098 * if it passes in CBPENDING and the lock still has users. list_for_each()
1100 * whose parents already hold a lock so forward progress list_for_each()
1102 if (lock->l_flags & LDLM_FL_CBPENDING && list_for_each()
1105 if (!unref && lock->l_flags & LDLM_FL_CBPENDING && list_for_each()
1106 lock->l_readers == 0 && lock->l_writers == 0) list_for_each()
1109 if (!(lock->l_req_mode & *mode)) list_for_each()
1111 match = lock->l_req_mode; list_for_each()
1113 if (lock->l_resource->lr_type == LDLM_EXTENT && list_for_each()
1114 (lock->l_policy_data.l_extent.start > list_for_each()
1116 lock->l_policy_data.l_extent.end < policy->l_extent.end)) list_for_each()
1120 lock->l_resource->lr_type == LDLM_EXTENT && list_for_each()
1121 lock->l_policy_data.l_extent.gid != policy->l_extent.gid) list_for_each()
1124 /* We match if we have existing lock with same or wider set list_for_each()
1126 if (lock->l_resource->lr_type == LDLM_IBITS && list_for_each()
1127 ((lock->l_policy_data.l_inodebits.bits & list_for_each()
1132 if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK)) list_for_each()
1136 !(lock->l_flags & LDLM_FL_LOCAL)) list_for_each()
1140 LDLM_LOCK_GET(lock); list_for_each()
1141 ldlm_lock_touch_in_lru(lock); list_for_each()
1143 ldlm_lock_addref_internal_nolock(lock, match); list_for_each()
1146 return lock; list_for_each()
1152 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) ldlm_lock_fail_match_locked() argument
1154 if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { ldlm_lock_fail_match_locked()
1155 lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; ldlm_lock_fail_match_locked()
1156 wake_up_all(&lock->l_waitq); ldlm_lock_fail_match_locked()
1161 void ldlm_lock_fail_match(struct ldlm_lock *lock) ldlm_lock_fail_match() argument
1163 lock_res_and_lock(lock); ldlm_lock_fail_match()
1164 ldlm_lock_fail_match_locked(lock); ldlm_lock_fail_match()
1165 unlock_res_and_lock(lock); ldlm_lock_fail_match()
1170 * Mark lock as "matchable" by OST.
1172 * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
1174 * Assumes LDLM lock is already locked.
1176 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) ldlm_lock_allow_match_locked() argument
1178 lock->l_flags |= LDLM_FL_LVB_READY; ldlm_lock_allow_match_locked()
1179 wake_up_all(&lock->l_waitq); ldlm_lock_allow_match_locked()
1184 * Mark lock as "matchable" by OST.
1185 * Locks the lock and then \see ldlm_lock_allow_match_locked
1187 void ldlm_lock_allow_match(struct ldlm_lock *lock) ldlm_lock_allow_match() argument
1189 lock_res_and_lock(lock); ldlm_lock_allow_match()
1190 ldlm_lock_allow_match_locked(lock); ldlm_lock_allow_match()
1191 unlock_res_and_lock(lock); ldlm_lock_allow_match()
1196 * Attempt to find a lock with specified properties.
1198 * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
1203 * If 'ns' is NULL, then lockh describes an existing lock that we want to look
1215 * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
1218 * \retval 1 if it finds an already-existing lock that is compatible; in this
1219 * case, lockh is filled in with a addref()ed lock
1231 struct ldlm_lock *lock, *old_lock = NULL; ldlm_lock_match() local
1253 lock = search_queue(&res->lr_granted, &mode, policy, old_lock, ldlm_lock_match()
1255 if (lock != NULL) { ldlm_lock_match()
1263 lock = search_queue(&res->lr_converting, &mode, policy, old_lock, ldlm_lock_match()
1265 if (lock != NULL) { ldlm_lock_match()
1269 lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, ldlm_lock_match()
1271 if (lock != NULL) { ldlm_lock_match()
1281 if (lock) { ldlm_lock_match()
1282 ldlm_lock2handle(lock, lockh); ldlm_lock_match()
1284 (!(lock->l_flags & LDLM_FL_LVB_READY))) { ldlm_lock_match()
1289 if (lock->l_completion_ast) { ldlm_lock_match()
1290 int err = lock->l_completion_ast(lock, ldlm_lock_match()
1295 LDLM_LOCK_RELEASE(lock); ldlm_lock_match()
1297 ldlm_lock_decref_internal(lock, ldlm_lock_match()
1308 l_wait_event(lock->l_waitq, ldlm_lock_match()
1309 lock->l_flags & wait_flags, ldlm_lock_match()
1311 if (!(lock->l_flags & LDLM_FL_LVB_READY)) { ldlm_lock_match()
1313 LDLM_LOCK_RELEASE(lock); ldlm_lock_match()
1315 ldlm_lock_decref_internal(lock, mode); ldlm_lock_match()
1322 LDLM_DEBUG(lock, "matched (%llu %llu)", ldlm_lock_match()
1329 if (lock->l_conn_export && ldlm_lock_match()
1331 class_exp2cliimp(lock->l_conn_export))) { ldlm_lock_match()
1333 ldlm_lock_decref_internal(lock, mode); ldlm_lock_match()
1338 LDLM_LOCK_RELEASE(lock); ldlm_lock_match()
1359 struct ldlm_lock *lock; ldlm_revalidate_lock_handle() local
1362 lock = ldlm_handle2lock(lockh); ldlm_revalidate_lock_handle()
1363 if (lock != NULL) { ldlm_revalidate_lock_handle()
1364 lock_res_and_lock(lock); ldlm_revalidate_lock_handle()
1365 if (lock->l_flags & LDLM_FL_GONE_MASK) ldlm_revalidate_lock_handle()
1368 if (lock->l_flags & LDLM_FL_CBPENDING && ldlm_revalidate_lock_handle()
1369 lock->l_readers == 0 && lock->l_writers == 0) ldlm_revalidate_lock_handle()
1373 *bits = lock->l_policy_data.l_inodebits.bits; ldlm_revalidate_lock_handle()
1374 mode = lock->l_granted_mode; ldlm_revalidate_lock_handle()
1375 ldlm_lock_addref_internal_nolock(lock, mode); ldlm_revalidate_lock_handle()
1379 if (lock != NULL) { ldlm_revalidate_lock_handle()
1380 unlock_res_and_lock(lock); ldlm_revalidate_lock_handle()
1381 LDLM_LOCK_PUT(lock); ldlm_revalidate_lock_handle()
1388 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, ldlm_fill_lvb() argument
1396 switch (lock->l_lvb_type) { ldlm_fill_lvb()
1408 LDLM_ERROR(lock, "no LVB"); ldlm_fill_lvb()
1425 LDLM_ERROR(lock, "no LVB"); ldlm_fill_lvb()
1434 LDLM_ERROR(lock, "Replied unexpected ost LVB size %d", ldlm_fill_lvb()
1450 LDLM_ERROR(lock, "no LVB"); ldlm_fill_lvb()
1456 LDLM_ERROR(lock, ldlm_fill_lvb()
1471 LDLM_ERROR(lock, "no LVB"); ldlm_fill_lvb()
1478 LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type); ldlm_fill_lvb()
1487 * Create and fill in new LDLM lock with specified properties.
1488 * Returns a referenced lock
1498 struct ldlm_lock *lock; ldlm_lock_create() local
1505 lock = ldlm_lock_new(res); ldlm_lock_create()
1507 if (lock == NULL) ldlm_lock_create()
1510 lock->l_req_mode = mode; ldlm_lock_create()
1511 lock->l_ast_data = data; ldlm_lock_create()
1512 lock->l_pid = current_pid(); ldlm_lock_create()
1514 lock->l_flags |= LDLM_FL_NS_SRV; ldlm_lock_create()
1516 lock->l_blocking_ast = cbs->lcs_blocking; ldlm_lock_create()
1517 lock->l_completion_ast = cbs->lcs_completion; ldlm_lock_create()
1518 lock->l_glimpse_ast = cbs->lcs_glimpse; ldlm_lock_create()
1521 lock->l_tree_node = NULL; ldlm_lock_create()
1522 /* if this is the extent lock, allocate the interval tree node */ ldlm_lock_create()
1524 if (ldlm_interval_alloc(lock) == NULL) ldlm_lock_create()
1529 lock->l_lvb_len = lvb_len; ldlm_lock_create()
1530 OBD_ALLOC(lock->l_lvb_data, lvb_len); ldlm_lock_create()
1531 if (lock->l_lvb_data == NULL) ldlm_lock_create()
1535 lock->l_lvb_type = lvb_type; ldlm_lock_create()
1539 return lock; ldlm_lock_create()
1542 ldlm_lock_destroy(lock); ldlm_lock_create()
1543 LDLM_LOCK_RELEASE(lock); ldlm_lock_create()
1548 * Enqueue (request) a lock.
1550 * Does not block. As a result of enqueue the lock would be put
1553 * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
1554 * set, skip all the enqueueing and delegate lock processing to intent policy
1561 struct ldlm_lock *lock = *lockp; ldlm_lock_enqueue() local
1562 struct ldlm_resource *res = lock->l_resource; ldlm_lock_enqueue()
1567 lock->l_last_activity = get_seconds(); ldlm_lock_enqueue()
1571 rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags, ldlm_lock_enqueue()
1574 /* The lock that was returned has already been granted, ldlm_lock_enqueue()
1578 if (lock != *lockp) { ldlm_lock_enqueue()
1579 ldlm_lock_destroy(lock); ldlm_lock_enqueue()
1580 LDLM_LOCK_RELEASE(lock); ldlm_lock_enqueue()
1586 ldlm_lock_destroy(lock); ldlm_lock_enqueue()
1591 /* For a replaying lock, it might be already in granted list. So ldlm_lock_enqueue()
1592 * unlinking the lock will cause the interval node to be freed, we ldlm_lock_enqueue()
1594 * this lock in the future. - jay */ ldlm_lock_enqueue()
1598 lock_res_and_lock(lock); ldlm_lock_enqueue()
1599 if (local && lock->l_req_mode == lock->l_granted_mode) { ldlm_lock_enqueue()
1600 /* The server returned a blocked lock, but it was granted ldlm_lock_enqueue()
1608 ldlm_resource_unlink_lock(lock); ldlm_lock_enqueue()
1609 if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) { ldlm_lock_enqueue()
1611 ldlm_lock_destroy_nolock(lock); ldlm_lock_enqueue()
1617 ldlm_interval_attach(node, lock); ldlm_lock_enqueue()
1622 * lock's l_flags. */ ldlm_lock_enqueue()
1623 lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; ldlm_lock_enqueue()
1625 /* This distinction between local lock trees is very important; a client ldlm_lock_enqueue()
1638 ldlm_resource_add_lock(res, &res->lr_converting, lock); ldlm_lock_enqueue()
1640 ldlm_resource_add_lock(res, &res->lr_waiting, lock); ldlm_lock_enqueue()
1642 ldlm_grant_lock(lock, NULL); ldlm_lock_enqueue()
1645 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n"); ldlm_lock_enqueue()
1650 unlock_res_and_lock(lock); ldlm_lock_enqueue()
1658 * Process a call to blocking AST callback for a lock in ast_work list
1666 struct ldlm_lock *lock; ldlm_work_bl_ast_lock() local
1671 lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast); ldlm_work_bl_ast_lock()
1674 lock_res_and_lock(lock); ldlm_work_bl_ast_lock()
1675 list_del_init(&lock->l_bl_ast); ldlm_work_bl_ast_lock()
1677 LASSERT(lock->l_flags & LDLM_FL_AST_SENT); ldlm_work_bl_ast_lock()
1678 LASSERT(lock->l_bl_ast_run == 0); ldlm_work_bl_ast_lock()
1679 LASSERT(lock->l_blocking_lock); ldlm_work_bl_ast_lock()
1680 lock->l_bl_ast_run++; ldlm_work_bl_ast_lock()
1681 unlock_res_and_lock(lock); ldlm_work_bl_ast_lock()
1683 ldlm_lock2desc(lock->l_blocking_lock, &d); ldlm_work_bl_ast_lock()
1685 rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING); ldlm_work_bl_ast_lock()
1686 LDLM_LOCK_RELEASE(lock->l_blocking_lock); ldlm_work_bl_ast_lock()
1687 lock->l_blocking_lock = NULL; ldlm_work_bl_ast_lock()
1688 LDLM_LOCK_RELEASE(lock); ldlm_work_bl_ast_lock()
1694 * Process a call to completion AST callback for a lock in ast_work list
1701 struct ldlm_lock *lock; ldlm_work_cp_ast_lock() local
1707 lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast); ldlm_work_cp_ast_lock()
1721 lock_res_and_lock(lock); ldlm_work_cp_ast_lock()
1722 list_del_init(&lock->l_cp_ast); ldlm_work_cp_ast_lock()
1723 LASSERT(lock->l_flags & LDLM_FL_CP_REQD); ldlm_work_cp_ast_lock()
1726 completion_callback = lock->l_completion_ast; ldlm_work_cp_ast_lock()
1727 lock->l_flags &= ~LDLM_FL_CP_REQD; ldlm_work_cp_ast_lock()
1728 unlock_res_and_lock(lock); ldlm_work_cp_ast_lock()
1731 rc = completion_callback(lock, 0, (void *)arg); ldlm_work_cp_ast_lock()
1732 LDLM_LOCK_RELEASE(lock); ldlm_work_cp_ast_lock()
1738 * Process a call to revocation AST callback for a lock in ast_work list
1746 struct ldlm_lock *lock; ldlm_work_revoke_ast_lock() local
1751 lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast); ldlm_work_revoke_ast_lock()
1752 list_del_init(&lock->l_rk_ast); ldlm_work_revoke_ast_lock()
1755 ldlm_lock2desc(lock, &desc); ldlm_work_revoke_ast_lock()
1759 rc = lock->l_blocking_ast(lock, &desc, (void *)arg, LDLM_CB_BLOCKING); ldlm_work_revoke_ast_lock()
1760 LDLM_LOCK_RELEASE(lock); ldlm_work_revoke_ast_lock()
1766 * Process a call to glimpse AST callback for a lock in ast_work list
1772 struct ldlm_lock *lock; ldlm_work_gl_ast_lock() local
1782 lock = gl_work->gl_lock; ldlm_work_gl_ast_lock()
1788 if (lock->l_glimpse_ast(lock, (void *)arg) == 0) ldlm_work_gl_ast_lock()
1791 LDLM_LOCK_RELEASE(lock); ldlm_work_gl_ast_lock()
1907 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n"); ldlm_reprocess_all()
1913 * Helper function to call blocking AST for LDLM lock \a lock in a
1916 void ldlm_cancel_callback(struct ldlm_lock *lock) ldlm_cancel_callback() argument
1918 check_res_locked(lock->l_resource); ldlm_cancel_callback()
1919 if (!(lock->l_flags & LDLM_FL_CANCEL)) { ldlm_cancel_callback()
1920 lock->l_flags |= LDLM_FL_CANCEL; ldlm_cancel_callback()
1921 if (lock->l_blocking_ast) { ldlm_cancel_callback()
1922 unlock_res_and_lock(lock); ldlm_cancel_callback()
1923 lock->l_blocking_ast(lock, NULL, lock->l_ast_data, ldlm_cancel_callback()
1925 lock_res_and_lock(lock); ldlm_cancel_callback()
1927 LDLM_DEBUG(lock, "no blocking ast"); ldlm_cancel_callback()
1930 lock->l_flags |= LDLM_FL_BL_DONE; ldlm_cancel_callback()
1934 * Remove skiplist-enabled LDLM lock \a req from granted list
1947 * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
1949 void ldlm_lock_cancel(struct ldlm_lock *lock) ldlm_lock_cancel() argument
1954 lock_res_and_lock(lock); ldlm_lock_cancel()
1956 res = lock->l_resource; ldlm_lock_cancel()
1961 if (lock->l_readers || lock->l_writers) { ldlm_lock_cancel()
1962 LDLM_ERROR(lock, "lock still has references"); ldlm_lock_cancel()
1966 if (lock->l_flags & LDLM_FL_WAITED) ldlm_lock_cancel()
1967 ldlm_del_waiting_lock(lock); ldlm_lock_cancel()
1970 ldlm_cancel_callback(lock); ldlm_lock_cancel()
1973 * running with no res lock in ldlm_cancel_callback */ ldlm_lock_cancel()
1974 if (lock->l_flags & LDLM_FL_WAITED) ldlm_lock_cancel()
1975 ldlm_del_waiting_lock(lock); ldlm_lock_cancel()
1977 ldlm_resource_unlink_lock(lock); ldlm_lock_cancel()
1978 ldlm_lock_destroy_nolock(lock); ldlm_lock_cancel()
1980 if (lock->l_granted_mode == lock->l_req_mode) ldlm_lock_cancel()
1981 ldlm_pool_del(&ns->ns_pool, lock); ldlm_lock_cancel()
1983 /* Make sure we will not be called again for same lock what is possible ldlm_lock_cancel()
1984 * if not to zero out lock->l_granted_mode */ ldlm_lock_cancel()
1985 lock->l_granted_mode = LCK_MINMODE; ldlm_lock_cancel()
1986 unlock_res_and_lock(lock); ldlm_lock_cancel()
1991 * Set opaque data into the lock that only makes sense to upper layer.
1995 struct ldlm_lock *lock = ldlm_handle2lock(lockh); ldlm_lock_set_data() local
1998 if (lock) { ldlm_lock_set_data()
1999 if (lock->l_ast_data == NULL) ldlm_lock_set_data()
2000 lock->l_ast_data = data; ldlm_lock_set_data()
2001 if (lock->l_ast_data == data) ldlm_lock_set_data()
2003 LDLM_LOCK_PUT(lock); ldlm_lock_set_data()
2024 struct ldlm_lock *lock = cfs_hash_object(hs, hnode); ldlm_cancel_locks_for_export_cb() local
2027 res = ldlm_resource_getref(lock->l_resource); ldlm_cancel_locks_for_export_cb()
2028 LDLM_LOCK_GET(lock); ldlm_cancel_locks_for_export_cb()
2030 LDLM_DEBUG(lock, "export %p", exp); ldlm_cancel_locks_for_export_cb()
2032 ldlm_lock_cancel(lock); ldlm_cancel_locks_for_export_cb()
2035 LDLM_LOCK_RELEASE(lock); ldlm_cancel_locks_for_export_cb()
2040 "Cancel lock %p for export %p (loop %d), still have %d locks left on hash table.\n", ldlm_cancel_locks_for_export_cb()
2041 lock, exp, ecl->ecl_loop, ldlm_cancel_locks_for_export_cb()
2065 * Downgrade an exclusive lock.
2071 * \param lock A lock to convert
2072 * \param new_mode new lock mode
2074 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode) ldlm_lock_downgrade() argument
2076 LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX)); ldlm_lock_downgrade()
2079 lock_res_and_lock(lock); ldlm_lock_downgrade()
2080 ldlm_resource_unlink_lock(lock); ldlm_lock_downgrade()
2082 * Remove the lock from pool as it will be added again in ldlm_lock_downgrade()
2085 ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock); ldlm_lock_downgrade()
2087 lock->l_req_mode = new_mode; ldlm_lock_downgrade()
2088 ldlm_grant_lock(lock, NULL); ldlm_lock_downgrade()
2089 unlock_res_and_lock(lock); ldlm_lock_downgrade()
2090 ldlm_reprocess_all(lock->l_resource); ldlm_lock_downgrade()
2095 * Attempt to convert already granted lock to a different mode.
2097 * While lock conversion is not currently used, future client-side
2101 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, ldlm_lock_convert() argument
2111 if (new_mode == lock->l_granted_mode) { ldlm_lock_convert()
2113 return lock->l_resource; ldlm_lock_convert()
2116 /* I can't check the type of lock here because the bitlock of lock ldlm_lock_convert()
2123 LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), ldlm_lock_convert()
2124 "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode); ldlm_lock_convert()
2126 lock_res_and_lock(lock); ldlm_lock_convert()
2128 res = lock->l_resource; ldlm_lock_convert()
2131 lock->l_req_mode = new_mode; ldlm_lock_convert()
2133 ldlm_resource_unlink_lock(lock); ldlm_lock_convert()
2135 ldlm_resource_unlink_lock(lock); ldlm_lock_convert()
2137 /* FIXME: ugly code, I have to attach the lock to a ldlm_lock_convert()
2141 ldlm_interval_attach(node, lock); ldlm_lock_convert()
2147 * Remove old lock from the pool before adding the lock with new ldlm_lock_convert()
2150 ldlm_pool_del(&ns->ns_pool, lock); ldlm_lock_convert()
2155 ldlm_resource_add_lock(res, &res->lr_converting, lock); ldlm_lock_convert()
2159 LDLM_ERROR(lock, "Erroneous flags %x on local lock\n", ldlm_lock_convert()
2163 ldlm_grant_lock(lock, &rpc_list); ldlm_lock_convert()
2166 if (lock->l_completion_ast) ldlm_lock_convert()
2167 lock->l_completion_ast(lock, 0, NULL); ldlm_lock_convert()
2170 CERROR("This is client-side-only module, cannot handle LDLM_NAMESPACE_SERVER resource type lock.\n"); ldlm_lock_convert()
2173 unlock_res_and_lock(lock); ldlm_lock_convert()
2184 * Print lock with lock handle \a lockh description into debug log.
2190 struct ldlm_lock *lock; ldlm_lock_dump_handle() local
2195 lock = ldlm_handle2lock(lockh); ldlm_lock_dump_handle()
2196 if (lock == NULL) ldlm_lock_dump_handle()
2199 LDLM_DEBUG_LIMIT(level, lock, "###"); ldlm_lock_dump_handle()
2201 LDLM_LOCK_PUT(lock); ldlm_lock_dump_handle()
2206 * Print lock information with custom message into debug log.
2209 void _ldlm_lock_debug(struct ldlm_lock *lock, _ldlm_lock_debug() argument
2214 struct obd_export *exp = lock->l_export; _ldlm_lock_debug()
2215 struct ldlm_resource *resource = lock->l_resource; _ldlm_lock_debug()
2230 " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", _ldlm_lock_debug()
2231 lock, _ldlm_lock_debug()
2232 lock->l_handle.h_cookie, atomic_read(&lock->l_refc), _ldlm_lock_debug()
2233 lock->l_readers, lock->l_writers, _ldlm_lock_debug()
2234 ldlm_lockname[lock->l_granted_mode], _ldlm_lock_debug()
2235 ldlm_lockname[lock->l_req_mode], _ldlm_lock_debug()
2236 lock->l_flags, nid, lock->l_remote_handle.cookie, _ldlm_lock_debug()
2238 lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type); _ldlm_lock_debug()
2246 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", _ldlm_lock_debug()
2247 ldlm_lock_to_ns_name(lock), lock, _ldlm_lock_debug()
2248 lock->l_handle.h_cookie, atomic_read(&lock->l_refc), _ldlm_lock_debug()
2249 lock->l_readers, lock->l_writers, _ldlm_lock_debug()
2250 ldlm_lockname[lock->l_granted_mode], _ldlm_lock_debug()
2251 ldlm_lockname[lock->l_req_mode], _ldlm_lock_debug()
2255 lock->l_policy_data.l_extent.start, _ldlm_lock_debug()
2256 lock->l_policy_data.l_extent.end, _ldlm_lock_debug()
2257 lock->l_req_extent.start, lock->l_req_extent.end, _ldlm_lock_debug()
2258 lock->l_flags, nid, lock->l_remote_handle.cookie, _ldlm_lock_debug()
2260 lock->l_pid, lock->l_callback_timeout, _ldlm_lock_debug()
2261 lock->l_lvb_type); _ldlm_lock_debug()
2266 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n", _ldlm_lock_debug()
2267 ldlm_lock_to_ns_name(lock), lock, _ldlm_lock_debug()
2268 lock->l_handle.h_cookie, atomic_read(&lock->l_refc), _ldlm_lock_debug()
2269 lock->l_readers, lock->l_writers, _ldlm_lock_debug()
2270 ldlm_lockname[lock->l_granted_mode], _ldlm_lock_debug()
2271 ldlm_lockname[lock->l_req_mode], _ldlm_lock_debug()
2275 lock->l_policy_data.l_flock.pid, _ldlm_lock_debug()
2276 lock->l_policy_data.l_flock.start, _ldlm_lock_debug()
2277 lock->l_policy_data.l_flock.end, _ldlm_lock_debug()
2278 lock->l_flags, nid, lock->l_remote_handle.cookie, _ldlm_lock_debug()
2280 lock->l_pid, lock->l_callback_timeout); _ldlm_lock_debug()
2285 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", _ldlm_lock_debug()
2286 ldlm_lock_to_ns_name(lock), _ldlm_lock_debug()
2287 lock, lock->l_handle.h_cookie, _ldlm_lock_debug()
2288 atomic_read(&lock->l_refc), _ldlm_lock_debug()
2289 lock->l_readers, lock->l_writers, _ldlm_lock_debug()
2290 ldlm_lockname[lock->l_granted_mode], _ldlm_lock_debug()
2291 ldlm_lockname[lock->l_req_mode], _ldlm_lock_debug()
2293 lock->l_policy_data.l_inodebits.bits, _ldlm_lock_debug()
2296 lock->l_flags, nid, lock->l_remote_handle.cookie, _ldlm_lock_debug()
2298 lock->l_pid, lock->l_callback_timeout, _ldlm_lock_debug()
2299 lock->l_lvb_type); _ldlm_lock_debug()
2304 " ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", _ldlm_lock_debug()
2305 ldlm_lock_to_ns_name(lock), _ldlm_lock_debug()
2306 lock, lock->l_handle.h_cookie, _ldlm_lock_debug()
2307 atomic_read(&lock->l_refc), _ldlm_lock_debug()
2308 lock->l_readers, lock->l_writers, _ldlm_lock_debug()
2309 ldlm_lockname[lock->l_granted_mode], _ldlm_lock_debug()
2310 ldlm_lockname[lock->l_req_mode], _ldlm_lock_debug()
2314 lock->l_flags, nid, lock->l_remote_handle.cookie, _ldlm_lock_debug()
2316 lock->l_pid, lock->l_callback_timeout, _ldlm_lock_debug()
2317 lock->l_lvb_type); _ldlm_lock_debug()
H A Dl_lock.c44 * Lock a lock and its resource.
47 * but there is a case when we change resource of lock upon
48 * enqueue reply. We rely on lock->l_resource = new_res
51 struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock) lock_res_and_lock() argument
53 /* on server-side resource of lock doesn't change */ lock_res_and_lock()
54 if ((lock->l_flags & LDLM_FL_NS_SRV) == 0) lock_res_and_lock()
55 spin_lock(&lock->l_lock); lock_res_and_lock()
57 lock_res(lock->l_resource); lock_res_and_lock()
59 lock->l_flags |= LDLM_FL_RES_LOCKED; lock_res_and_lock()
60 return lock->l_resource; lock_res_and_lock()
65 * Unlock a lock and its resource previously locked with lock_res_and_lock
67 void unlock_res_and_lock(struct ldlm_lock *lock) unlock_res_and_lock() argument
69 /* on server-side resource of lock doesn't change */ unlock_res_and_lock()
70 lock->l_flags &= ~LDLM_FL_RES_LOCKED; unlock_res_and_lock()
72 unlock_res(lock->l_resource); unlock_res_and_lock()
73 if ((lock->l_flags & LDLM_FL_NS_SRV) == 0) unlock_res_and_lock()
74 spin_unlock(&lock->l_lock); unlock_res_and_lock()
H A Dldlm_flock.c42 * This file implements POSIX lock type for Lustre.
48 * merged into a single wider lock.
52 * NL to request a releasing of a portion of the lock
66 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
81 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new) ldlm_same_flock_owner() argument
84 lock->l_policy_data.l_flock.owner) && ldlm_same_flock_owner()
85 (new->l_export == lock->l_export)); ldlm_same_flock_owner()
89 ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) ldlm_flocks_overlap() argument
92 lock->l_policy_data.l_flock.end) && ldlm_flocks_overlap()
94 lock->l_policy_data.l_flock.start)); ldlm_flocks_overlap()
98 struct ldlm_lock *lock) ldlm_flock_blocking_link()
107 lock->l_policy_data.l_flock.owner; ldlm_flock_blocking_link()
109 lock->l_export; ldlm_flock_blocking_link()
132 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) ldlm_flock_destroy() argument
134 LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)", ldlm_flock_destroy()
137 /* Safe to not lock here, since it should be empty anyway */ ldlm_flock_destroy()
138 LASSERT(hlist_unhashed(&lock->l_exp_flock_hash)); ldlm_flock_destroy()
140 list_del_init(&lock->l_res_link); ldlm_flock_destroy()
142 !(lock->l_flags & LDLM_FL_FAILED)) { ldlm_flock_destroy()
144 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; ldlm_flock_destroy()
148 ldlm_lock_decref_internal_nolock(lock, mode); ldlm_flock_destroy()
151 ldlm_lock_destroy_nolock(lock); ldlm_flock_destroy()
157 * Given a new lock \a req and an existing lock \a bl_lock it conflicts
160 * one client holds a lock on something and want a lock on something
178 struct ldlm_lock *lock = NULL; ldlm_flock_deadlock() local
182 lock = cfs_hash_lookup(bl_exp->exp_flock_hash, ldlm_flock_deadlock()
184 if (lock == NULL) ldlm_flock_deadlock()
187 LASSERT(req != lock); ldlm_flock_deadlock()
188 flock = &lock->l_policy_data.l_flock; ldlm_flock_deadlock()
194 cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash); ldlm_flock_deadlock()
207 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock, ldlm_flock_cancel_on_deadlock() argument
210 CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock); ldlm_flock_cancel_on_deadlock()
212 if ((exp_connect_flags(lock->l_export) & ldlm_flock_cancel_on_deadlock()
217 LASSERT(lock->l_completion_ast); ldlm_flock_cancel_on_deadlock()
218 LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0); ldlm_flock_cancel_on_deadlock()
219 lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK | ldlm_flock_cancel_on_deadlock()
221 ldlm_flock_blocking_unlink(lock); ldlm_flock_cancel_on_deadlock()
222 ldlm_resource_unlink_lock(lock); ldlm_flock_cancel_on_deadlock()
223 ldlm_add_ast_work_item(lock, NULL, work_list); ldlm_flock_cancel_on_deadlock()
228 * Process a granting attempt for flock lock.
229 * Must be called under ns lock held.
231 * This function looks for any conflicts for \a lock in the granted or
232 * waiting queues. The lock is granted if no conflicts are found in
235 * It is also responsible for splitting a lock if a portion of the lock
253 struct ldlm_lock *lock = NULL; ldlm_process_flock_lock() local
277 /* Called on the server for lock cancels. */ ldlm_process_flock_lock()
286 lock = list_entry(tmp, struct ldlm_lock, ldlm_process_flock_lock()
288 if (ldlm_same_flock_owner(lock, req)) { ldlm_process_flock_lock()
299 * that conflict with the new lock request. */ ldlm_process_flock_lock()
301 lock = list_entry(tmp, struct ldlm_lock, ldlm_process_flock_lock()
304 if (ldlm_same_flock_owner(lock, req)) { ldlm_process_flock_lock()
311 if (lockmode_compat(lock->l_granted_mode, mode)) ldlm_process_flock_lock()
314 if (!ldlm_flocks_overlap(lock, req)) ldlm_process_flock_lock()
319 if (ldlm_flock_deadlock(req, lock)) { ldlm_process_flock_lock()
335 req->l_req_mode = lock->l_granted_mode; ldlm_process_flock_lock()
337 lock->l_policy_data.l_flock.pid; ldlm_process_flock_lock()
339 lock->l_policy_data.l_flock.start; ldlm_process_flock_lock()
341 lock->l_policy_data.l_flock.end; ldlm_process_flock_lock()
346 /* add lock to blocking list before deadlock ldlm_process_flock_lock()
348 ldlm_flock_blocking_link(req, lock); ldlm_process_flock_lock()
350 if (ldlm_flock_deadlock(req, lock)) { ldlm_process_flock_lock()
372 /* In case we had slept on this lock request take it off of the ldlm_process_flock_lock()
383 lock = list_entry(ownlocks, struct ldlm_lock, l_res_link); ldlm_process_flock_lock()
385 if (!ldlm_same_flock_owner(lock, new)) ldlm_process_flock_lock()
388 if (lock->l_granted_mode == mode) { ldlm_process_flock_lock()
390 * locks that overlap OR adjoin the new lock. The extra ldlm_process_flock_lock()
394 (lock->l_policy_data.l_flock.end + 1)) ldlm_process_flock_lock()
395 && (lock->l_policy_data.l_flock.end != ldlm_process_flock_lock()
400 (lock->l_policy_data.l_flock.start - 1)) ldlm_process_flock_lock()
401 && (lock->l_policy_data.l_flock.start != 0)) ldlm_process_flock_lock()
405 lock->l_policy_data.l_flock.start) { ldlm_process_flock_lock()
406 lock->l_policy_data.l_flock.start = ldlm_process_flock_lock()
410 lock->l_policy_data.l_flock.start; ldlm_process_flock_lock()
414 lock->l_policy_data.l_flock.end) { ldlm_process_flock_lock()
415 lock->l_policy_data.l_flock.end = ldlm_process_flock_lock()
419 lock->l_policy_data.l_flock.end; ldlm_process_flock_lock()
423 ldlm_flock_destroy(lock, mode, *flags); ldlm_process_flock_lock()
425 new = lock; ldlm_process_flock_lock()
432 lock->l_policy_data.l_flock.end) ldlm_process_flock_lock()
436 lock->l_policy_data.l_flock.start) ldlm_process_flock_lock()
442 lock->l_policy_data.l_flock.start) { ldlm_process_flock_lock()
444 lock->l_policy_data.l_flock.end) { ldlm_process_flock_lock()
445 lock->l_policy_data.l_flock.start = ldlm_process_flock_lock()
449 ldlm_flock_destroy(lock, lock->l_req_mode, *flags); ldlm_process_flock_lock()
453 lock->l_policy_data.l_flock.end) { ldlm_process_flock_lock()
454 lock->l_policy_data.l_flock.end = ldlm_process_flock_lock()
459 /* split the existing lock into two locks */ ldlm_process_flock_lock()
462 * allocating a new lock and use the req lock passed in ldlm_process_flock_lock()
465 * reply. The client side replays the lock request so ldlm_process_flock_lock()
466 * it must see the original lock data in the reply. */ ldlm_process_flock_lock()
469 * release the lr_lock, allocate the new lock, ldlm_process_flock_lock()
470 * and restart processing this lock. */ ldlm_process_flock_lock()
474 lock->l_granted_mode, &null_cbs, ldlm_process_flock_lock()
478 ldlm_flock_destroy(req, lock->l_granted_mode, ldlm_process_flock_lock()
488 new2->l_granted_mode = lock->l_granted_mode; ldlm_process_flock_lock()
494 lock->l_policy_data.l_flock.start; ldlm_process_flock_lock()
497 lock->l_policy_data.l_flock.start = ldlm_process_flock_lock()
499 new2->l_conn_export = lock->l_conn_export; ldlm_process_flock_lock()
500 if (lock->l_export != NULL) { ldlm_process_flock_lock()
501 new2->l_export = class_export_lock_get(lock->l_export, ldlm_process_flock_lock()
511 lock->l_granted_mode); ldlm_process_flock_lock()
513 /* insert new2 at lock */ ldlm_process_flock_lock()
523 /* At this point we're granting the lock request. */ ldlm_process_flock_lock()
529 /* insert new lock before ownlocks in list. */ ldlm_process_flock_lock()
541 /* In case we're reprocessing the requested lock we can't destroy ldlm_process_flock_lock()
560 struct ldlm_lock *lock; ldlm_flock_interrupted_wait() local
562 lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock; ldlm_flock_interrupted_wait()
564 /* take lock off the deadlock detection hash list. */ ldlm_flock_interrupted_wait()
565 lock_res_and_lock(lock); ldlm_flock_interrupted_wait()
566 ldlm_flock_blocking_unlink(lock); ldlm_flock_interrupted_wait()
568 /* client side - set flag to prevent lock from being put on LRU list */ ldlm_flock_interrupted_wait()
569 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_flock_interrupted_wait()
570 unlock_res_and_lock(lock); ldlm_flock_interrupted_wait()
576 * \param lock [in,out]: A lock to be handled
584 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) ldlm_flock_completion_ast() argument
586 struct file_lock *getlk = lock->l_ast_data; ldlm_flock_completion_ast()
597 /* Import invalidation. We need to actually release the lock ldlm_flock_completion_ast()
599 * holding the lock even if app still believes it has it, since ldlm_flock_completion_ast()
601 if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) == ldlm_flock_completion_ast()
603 if (lock->l_req_mode == lock->l_granted_mode && ldlm_flock_completion_ast()
604 lock->l_granted_mode != LCK_NL && ldlm_flock_completion_ast()
606 ldlm_lock_decref_internal(lock, lock->l_req_mode); ldlm_flock_completion_ast()
609 wake_up(&lock->l_waitq); ldlm_flock_completion_ast()
618 /* mds granted the lock in the reply */ ldlm_flock_completion_ast()
620 /* CP AST RPC: lock get granted, wake it up */ ldlm_flock_completion_ast()
621 wake_up(&lock->l_waitq); ldlm_flock_completion_ast()
625 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping"); ldlm_flock_completion_ast()
626 fwd.fwd_lock = lock; ldlm_flock_completion_ast()
627 obd = class_exp2obd(lock->l_conn_export); ldlm_flock_completion_ast()
629 /* if this is a local lock, there is no import */ ldlm_flock_completion_ast()
641 /* Go to sleep until the lock is granted. */ ldlm_flock_completion_ast()
642 rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi); ldlm_flock_completion_ast()
645 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", ldlm_flock_completion_ast()
653 if (lock->l_flags & LDLM_FL_DESTROYED) { ldlm_flock_completion_ast()
654 LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed"); ldlm_flock_completion_ast()
658 if (lock->l_flags & LDLM_FL_FAILED) { ldlm_flock_completion_ast()
659 LDLM_DEBUG(lock, "client-side enqueue waking up: failed"); ldlm_flock_completion_ast()
664 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", ldlm_flock_completion_ast()
669 LDLM_DEBUG(lock, "client-side enqueue granted"); ldlm_flock_completion_ast()
671 lock_res_and_lock(lock); ldlm_flock_completion_ast()
673 /* take lock off the deadlock detection hash list. */ ldlm_flock_completion_ast()
674 ldlm_flock_blocking_unlink(lock); ldlm_flock_completion_ast()
676 /* ldlm_lock_enqueue() has already placed lock on the granted list. */ ldlm_flock_completion_ast()
677 list_del_init(&lock->l_res_link); ldlm_flock_completion_ast()
679 if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) { ldlm_flock_completion_ast()
680 LDLM_DEBUG(lock, "client-side enqueue deadlock received"); ldlm_flock_completion_ast()
685 * in the lock changes we can decref the appropriate refcount.*/ ldlm_flock_completion_ast()
686 ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC); ldlm_flock_completion_ast()
687 switch (lock->l_granted_mode) { ldlm_flock_completion_ast()
697 getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid; ldlm_flock_completion_ast()
698 getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start; ldlm_flock_completion_ast()
699 getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end; ldlm_flock_completion_ast()
703 /* We need to reprocess the lock to do merges or splits ldlm_flock_completion_ast()
705 ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL); ldlm_flock_completion_ast()
707 unlock_res_and_lock(lock); ldlm_flock_completion_ast()
712 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, ldlm_flock_blocking_ast() argument
715 LASSERT(lock); ldlm_flock_blocking_ast()
718 /* take lock off the deadlock detection hash list. */ ldlm_flock_blocking_ast()
719 lock_res_and_lock(lock); ldlm_flock_blocking_ast()
720 ldlm_flock_blocking_unlink(lock); ldlm_flock_blocking_ast()
721 unlock_res_and_lock(lock); ldlm_flock_blocking_ast()
771 struct ldlm_lock *lock; ldlm_export_flock_key() local
773 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash); ldlm_export_flock_key()
774 return &lock->l_policy_data.l_flock.owner; ldlm_export_flock_key()
792 struct ldlm_lock *lock; ldlm_export_flock_get() local
795 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash); ldlm_export_flock_get()
796 LDLM_LOCK_GET(lock); ldlm_export_flock_get()
798 flock = &lock->l_policy_data.l_flock; ldlm_export_flock_get()
807 struct ldlm_lock *lock; ldlm_export_flock_put() local
810 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash); ldlm_export_flock_put()
811 LDLM_LOCK_RELEASE(lock); ldlm_export_flock_put()
813 flock = &lock->l_policy_data.l_flock; ldlm_export_flock_put()
97 ldlm_flock_blocking_link(struct ldlm_lock *req, struct ldlm_lock *lock) ldlm_flock_blocking_link() argument
H A Dldlm_request.c40 * An AST is a callback issued on a lock when its state is changed. There are
41 * several different types of ASTs (callbacks) registered for each lock:
43 * - completion AST: when a lock is enqueued by some process, but cannot be
45 * the completion AST is sent to notify the caller when the lock is
48 * - blocking AST: when a lock is granted to some process, if another process
49 * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
50 * sent to notify the holder(s) of the lock(s) of the conflicting lock
51 * request. The lock holder(s) must release their lock(s) on that resource in
54 * - glimpse AST: this is used when a process wants information about a lock
55 * (i.e. the lock value block (LVB)) but does not necessarily require holding
56 * the lock. If the resource is locked, the lock holder(s) are sent glimpse
57 * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
58 * their lock(s) if they are idle. If the resource is not locked, the server
59 * may grant the lock.
72 MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
93 struct ldlm_lock *lock = lwd->lwd_lock; ldlm_expired_completion_wait() local
97 if (lock->l_conn_export == NULL) { ldlm_expired_completion_wait()
100 LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", " ldlm_expired_completion_wait()
102 lock->l_last_activity, ldlm_expired_completion_wait()
104 lock->l_last_activity)); ldlm_expired_completion_wait()
105 LDLM_DEBUG(lock, "lock timed out (enqueued at " CFS_TIME_T ", " CFS_DURATION_T "s ago); not entering recovery in server code, just going back to sleep", ldlm_expired_completion_wait()
106 lock->l_last_activity, ldlm_expired_completion_wait()
108 lock->l_last_activity)); ldlm_expired_completion_wait()
113 ldlm_lock_to_ns(lock)); ldlm_expired_completion_wait()
120 obd = lock->l_conn_export->exp_obd; ldlm_expired_completion_wait()
123 LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", " ldlm_expired_completion_wait()
125 lock->l_last_activity, ldlm_expired_completion_wait()
126 cfs_time_sub(get_seconds(), lock->l_last_activity), ldlm_expired_completion_wait()
135 int ldlm_get_enq_timeout(struct ldlm_lock *lock) ldlm_get_enq_timeout() argument
137 int timeout = at_get(ldlm_lock_to_ns_at(lock)); ldlm_get_enq_timeout()
143 lock callbacks too... */ ldlm_get_enq_timeout()
150 * Helper function for ldlm_completion_ast(), updating timings when lock is
153 static int ldlm_completion_tail(struct ldlm_lock *lock) ldlm_completion_tail() argument
158 if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) { ldlm_completion_tail()
159 LDLM_DEBUG(lock, "client-side enqueue: destroyed"); ldlm_completion_tail()
163 lock->l_last_activity); ldlm_completion_tail()
164 LDLM_DEBUG(lock, "client-side enqueue: granted after " ldlm_completion_tail()
168 at_measured(ldlm_lock_to_ns_at(lock), ldlm_completion_tail()
177 * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
180 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data) ldlm_completion_ast_async() argument
183 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock"); ldlm_completion_ast_async()
189 wake_up(&lock->l_waitq); ldlm_completion_ast_async()
190 return ldlm_completion_tail(lock); ldlm_completion_ast_async()
193 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward"); ldlm_completion_ast_async()
194 ldlm_reprocess_all(lock->l_resource); ldlm_completion_ast_async()
206 * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
209 * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
214 * - during lock conversion (not used currently).
216 * If lock is not granted in the first case, this function waits until second
220 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) ldlm_completion_ast() argument
231 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock"); ldlm_completion_ast()
237 wake_up(&lock->l_waitq); ldlm_completion_ast()
241 LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping"); ldlm_completion_ast()
245 obd = class_exp2obd(lock->l_conn_export); ldlm_completion_ast()
247 /* if this is a local lock, then there is no import */ ldlm_completion_ast()
252 lock from another client. Server will evict the other client if it ldlm_completion_ast()
253 doesn't respond reasonably, and then give us the lock. */ ldlm_completion_ast()
254 timeout = ldlm_get_enq_timeout(lock) * 2; ldlm_completion_ast()
256 lwd.lwd_lock = lock; ldlm_completion_ast()
258 if (lock->l_flags & LDLM_FL_NO_TIMEOUT) { ldlm_completion_ast()
259 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT"); ldlm_completion_ast()
273 if (ns_is_client(ldlm_lock_to_ns(lock)) && ldlm_completion_ast()
276 lock->l_flags |= LDLM_FL_FAIL_LOC; ldlm_completion_ast()
279 /* Go to sleep until the lock is granted or cancelled. */ ldlm_completion_ast()
280 rc = l_wait_event(lock->l_waitq, ldlm_completion_ast()
281 is_granted_or_cancelled(lock), &lwi); ldlm_completion_ast()
285 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)", ldlm_completion_ast()
290 return ldlm_completion_tail(lock); ldlm_completion_ast()
298 * deferred lock cancellation.
300 * \param lock the lock blocking or canceling AST was called on
305 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock) ldlm_blocking_ast_nocheck() argument
309 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_blocking_ast_nocheck()
310 do_ast = !lock->l_readers && !lock->l_writers; ldlm_blocking_ast_nocheck()
311 unlock_res_and_lock(lock); ldlm_blocking_ast_nocheck()
317 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel"); ldlm_blocking_ast_nocheck()
318 ldlm_lock2handle(lock, &lockh); ldlm_blocking_ast_nocheck()
323 LDLM_DEBUG(lock, "Lock still has references, will be cancelled later"); ldlm_blocking_ast_nocheck()
335 * \param lock the lock which blocks a request or cancelling lock
342 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, ldlm_blocking_ast() argument
350 lock_res_and_lock(lock); ldlm_blocking_ast()
353 * takes the lr_lock, then by the time we get the lock, we might not ldlm_blocking_ast()
356 if (lock->l_blocking_ast != ldlm_blocking_ast) { ldlm_blocking_ast()
357 unlock_res_and_lock(lock); ldlm_blocking_ast()
360 return ldlm_blocking_ast_nocheck(lock); ldlm_blocking_ast()
368 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp) ldlm_glimpse_ast() argument
393 * Enqueue a local lock (typically on a server).
406 struct ldlm_lock *lock; ldlm_cli_enqueue_local() local
415 CERROR("Trying to enqueue local lock in a shadow namespace\n"); ldlm_cli_enqueue_local()
419 lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len, ldlm_cli_enqueue_local()
421 if (unlikely(!lock)) { ldlm_cli_enqueue_local()
426 ldlm_lock2handle(lock, lockh); ldlm_cli_enqueue_local()
428 /* NB: we don't have any lock now (lock_res_and_lock) ldlm_cli_enqueue_local()
429 * because it's a new lock */ ldlm_cli_enqueue_local()
430 ldlm_lock_addref_internal_nolock(lock, mode); ldlm_cli_enqueue_local()
431 lock->l_flags |= LDLM_FL_LOCAL; ldlm_cli_enqueue_local()
433 lock->l_flags |= LDLM_FL_ATOMIC_CB; ldlm_cli_enqueue_local()
436 lock->l_policy_data = *policy; ldlm_cli_enqueue_local()
438 lock->l_client_cookie = *client_cookie; ldlm_cli_enqueue_local()
440 lock->l_req_extent = policy->l_extent; ldlm_cli_enqueue_local()
442 err = ldlm_lock_enqueue(ns, &lock, policy, flags); ldlm_cli_enqueue_local()
447 *policy = lock->l_policy_data; ldlm_cli_enqueue_local()
449 if (lock->l_completion_ast) ldlm_cli_enqueue_local()
450 lock->l_completion_ast(lock, *flags, NULL); ldlm_cli_enqueue_local()
452 LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created"); ldlm_cli_enqueue_local()
454 LDLM_LOCK_RELEASE(lock); ldlm_cli_enqueue_local()
461 struct ldlm_lock *lock, int mode) failed_lock_cleanup()
466 lock_res_and_lock(lock); failed_lock_cleanup()
467 /* Check that lock is not granted or failed, we might race. */ failed_lock_cleanup()
468 if ((lock->l_req_mode != lock->l_granted_mode) && failed_lock_cleanup()
469 !(lock->l_flags & LDLM_FL_FAILED)) { failed_lock_cleanup()
470 /* Make sure that this lock will not be found by raced failed_lock_cleanup()
473 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | failed_lock_cleanup()
477 unlock_res_and_lock(lock); failed_lock_cleanup()
480 LDLM_DEBUG(lock, failed_lock_cleanup()
483 LDLM_DEBUG(lock, "lock was granted or failed in race"); failed_lock_cleanup()
485 ldlm_lock_decref_internal(lock, mode); failed_lock_cleanup()
493 if (lock->l_resource->lr_type == LDLM_FLOCK) { failed_lock_cleanup()
494 lock_res_and_lock(lock); failed_lock_cleanup()
495 ldlm_resource_unlink_lock(lock); failed_lock_cleanup()
496 ldlm_lock_destroy_nolock(lock); failed_lock_cleanup()
497 unlock_res_and_lock(lock); failed_lock_cleanup()
502 * Finishing portion of client lock enqueue code.
513 struct ldlm_lock *lock; ldlm_cli_enqueue_fini() local
518 lock = ldlm_handle2lock(lockh); ldlm_cli_enqueue_fini()
519 /* ldlm_cli_enqueue is holding a reference on this lock. */ ldlm_cli_enqueue_fini()
520 if (!lock) { ldlm_cli_enqueue_fini()
525 LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len), ldlm_cli_enqueue_fini()
526 "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len); ldlm_cli_enqueue_fini()
530 LDLM_DEBUG(lock, "client-side enqueue END (%s)", ldlm_cli_enqueue_fini()
550 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size); ldlm_cli_enqueue_fini()
554 LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d", ldlm_cli_enqueue_fini()
563 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, ldlm_cli_enqueue_fini()
570 /* lock enqueued on the server */ ldlm_cli_enqueue_fini()
573 lock_res_and_lock(lock); ldlm_cli_enqueue_fini()
574 /* Key change rehash lock in per-export hash with new key */ ldlm_cli_enqueue_fini()
580 &lock->l_remote_handle, ldlm_cli_enqueue_fini()
582 &lock->l_exp_hash); ldlm_cli_enqueue_fini()
584 lock->l_remote_handle = reply->lock_handle; ldlm_cli_enqueue_fini()
588 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & ldlm_cli_enqueue_fini()
590 /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match() ldlm_cli_enqueue_fini()
592 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & ldlm_cli_enqueue_fini()
594 unlock_res_and_lock(lock); ldlm_cli_enqueue_fini()
597 lock, reply->lock_handle.cookie, *flags); ldlm_cli_enqueue_fini()
599 /* If enqueue returned a blocked lock but the completion handler has ldlm_cli_enqueue_fini()
606 if (newmode && newmode != lock->l_req_mode) { ldlm_cli_enqueue_fini()
607 LDLM_DEBUG(lock, "server returned different mode %s", ldlm_cli_enqueue_fini()
609 lock->l_req_mode = newmode; ldlm_cli_enqueue_fini()
613 &lock->l_resource->lr_name)) { ldlm_cli_enqueue_fini()
617 PLDLMRES(lock->l_resource)); ldlm_cli_enqueue_fini()
619 rc = ldlm_lock_change_resource(ns, lock, ldlm_cli_enqueue_fini()
621 if (rc || lock->l_resource == NULL) { ldlm_cli_enqueue_fini()
625 LDLM_DEBUG(lock, "client-side enqueue, new resource"); ldlm_cli_enqueue_fini()
630 /* We assume lock type cannot change on server*/ ldlm_cli_enqueue_fini()
632 lock->l_resource->lr_type, ldlm_cli_enqueue_fini()
634 &lock->l_policy_data); ldlm_cli_enqueue_fini()
636 LDLM_DEBUG(lock, ldlm_cli_enqueue_fini()
645 lock_res_and_lock(lock); ldlm_cli_enqueue_fini()
646 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; ldlm_cli_enqueue_fini()
647 unlock_res_and_lock(lock); ldlm_cli_enqueue_fini()
648 LDLM_DEBUG(lock, "enqueue reply includes blocking AST"); ldlm_cli_enqueue_fini()
651 /* If the lock has already been granted by a completion AST, don't ldlm_cli_enqueue_fini()
654 /* We must lock or a racing completion might update lvb without ldlm_cli_enqueue_fini()
658 lock_res_and_lock(lock); ldlm_cli_enqueue_fini()
659 if (lock->l_req_mode != lock->l_granted_mode) ldlm_cli_enqueue_fini()
660 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, ldlm_cli_enqueue_fini()
661 lock->l_lvb_data, size); ldlm_cli_enqueue_fini()
662 unlock_res_and_lock(lock); ldlm_cli_enqueue_fini()
670 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags); ldlm_cli_enqueue_fini()
671 if (lock->l_completion_ast != NULL) { ldlm_cli_enqueue_fini()
672 int err = lock->l_completion_ast(lock, *flags, NULL); ldlm_cli_enqueue_fini()
684 memcpy(lvb, lock->l_lvb_data, lvb_len); ldlm_cli_enqueue_fini()
687 LDLM_DEBUG(lock, "client-side enqueue END"); ldlm_cli_enqueue_fini()
690 failed_lock_cleanup(ns, lock, mode); ldlm_cli_enqueue_fini()
691 /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */ ldlm_cli_enqueue_fini()
692 LDLM_LOCK_PUT(lock); ldlm_cli_enqueue_fini()
693 LDLM_LOCK_RELEASE(lock); ldlm_cli_enqueue_fini()
699 * Estimate number of lock handles that would fit into request of given
791 /* Skip first lock handler in ldlm_request_pack(), ldlm_prep_elc_req()
793 * to the lock handle amount actually written to ldlm_prep_elc_req()
797 /* Pack into the request @pack lock handles. */ ldlm_prep_elc_req()
838 * Client-side lock enqueue.
855 struct ldlm_lock *lock; ldlm_cli_enqueue() local
866 /* If we're replaying this lock, just check some invariants. ldlm_cli_enqueue()
867 * If we're creating a new lock, get everything all setup nice. */ ldlm_cli_enqueue()
869 lock = ldlm_handle2lock_long(lockh, 0); ldlm_cli_enqueue()
870 LASSERT(lock != NULL); ldlm_cli_enqueue()
871 LDLM_DEBUG(lock, "client-side enqueue START"); ldlm_cli_enqueue()
872 LASSERT(exp == lock->l_conn_export); ldlm_cli_enqueue()
879 lock = ldlm_lock_create(ns, res_id, einfo->ei_type, ldlm_cli_enqueue()
882 if (lock == NULL) ldlm_cli_enqueue()
884 /* for the local lock, add the reference */ ldlm_cli_enqueue()
885 ldlm_lock_addref_internal(lock, einfo->ei_mode); ldlm_cli_enqueue()
886 ldlm_lock2handle(lock, lockh); ldlm_cli_enqueue()
888 lock->l_policy_data = *policy; ldlm_cli_enqueue()
891 lock->l_req_extent = policy->l_extent; ldlm_cli_enqueue()
892 LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n", ldlm_cli_enqueue()
896 lock->l_conn_export = exp; ldlm_cli_enqueue()
897 lock->l_export = NULL; ldlm_cli_enqueue()
898 lock->l_blocking_ast = einfo->ei_cb_bl; ldlm_cli_enqueue()
899 lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL)); ldlm_cli_enqueue()
901 /* lock not sent to server yet */ ldlm_cli_enqueue()
909 failed_lock_cleanup(ns, lock, einfo->ei_mode); ldlm_cli_enqueue()
910 LDLM_LOCK_RELEASE(lock); ldlm_cli_enqueue()
926 /* Dump lock data into the request buffer */ ldlm_cli_enqueue()
928 ldlm_lock2desc(lock, &body->lock_desc); ldlm_cli_enqueue()
944 * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where ldlm_cli_enqueue()
945 * [i_size, OBD_OBJECT_EOF] lock is taken. ldlm_cli_enqueue()
955 LDLM_DEBUG(lock, "sending request"); ldlm_cli_enqueue()
963 /* If ldlm_cli_enqueue_fini did not find the lock, we need to free ldlm_cli_enqueue()
966 LDLM_LOCK_RELEASE(lock); ldlm_cli_enqueue()
980 static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode, ldlm_cli_convert_local() argument
986 if (ns_is_client(ldlm_lock_to_ns(lock))) { ldlm_cli_convert_local()
987 CERROR("Trying to cancel local lock\n"); ldlm_cli_convert_local()
990 LDLM_DEBUG(lock, "client-side local convert"); ldlm_cli_convert_local()
992 res = ldlm_lock_convert(lock, new_mode, flags); ldlm_cli_convert_local()
999 LDLM_DEBUG(lock, "client-side local convert handler END"); ldlm_cli_convert_local()
1000 LDLM_LOCK_PUT(lock); ldlm_cli_convert_local()
1006 /* Caller of this code is supposed to take care of lock readers/writers
1012 struct ldlm_lock *lock; ldlm_cli_convert() local
1017 lock = ldlm_handle2lock(lockh); ldlm_cli_convert()
1018 if (!lock) { ldlm_cli_convert()
1024 if (lock->l_conn_export == NULL) ldlm_cli_convert()
1025 return ldlm_cli_convert_local(lock, new_mode, flags); ldlm_cli_convert()
1027 LDLM_DEBUG(lock, "client-side convert"); ldlm_cli_convert()
1029 req = ptlrpc_request_alloc_pack(class_exp2cliimp(lock->l_conn_export), ldlm_cli_convert()
1033 LDLM_LOCK_PUT(lock); ldlm_cli_convert()
1038 body->lock_handle[0] = lock->l_remote_handle; ldlm_cli_convert()
1060 res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags); ldlm_cli_convert()
1063 /* Go to sleep until the lock is granted. */ ldlm_cli_convert()
1065 if (lock->l_completion_ast) { ldlm_cli_convert()
1066 rc = lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC, ldlm_cli_convert()
1075 LDLM_LOCK_PUT(lock); ldlm_cli_convert()
1088 static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock) ldlm_cli_cancel_local() argument
1092 if (lock->l_conn_export) { ldlm_cli_cancel_local()
1095 LDLM_DEBUG(lock, "client-side cancel"); ldlm_cli_cancel_local()
1097 lock_res_and_lock(lock); ldlm_cli_cancel_local()
1098 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_cli_cancel_local()
1099 local_only = !!(lock->l_flags & ldlm_cli_cancel_local()
1101 ldlm_cancel_callback(lock); ldlm_cli_cancel_local()
1102 rc = (lock->l_flags & LDLM_FL_BL_AST) ? ldlm_cli_cancel_local()
1104 unlock_res_and_lock(lock); ldlm_cli_cancel_local()
1110 ldlm_lock_cancel(lock); ldlm_cli_cancel_local()
1112 if (ns_is_client(ldlm_lock_to_ns(lock))) { ldlm_cli_cancel_local()
1113 LDLM_ERROR(lock, "Trying to cancel local lock"); ldlm_cli_cancel_local()
1116 LDLM_DEBUG(lock, "server-side local cancel"); ldlm_cli_cancel_local()
1117 ldlm_lock_cancel(lock); ldlm_cli_cancel_local()
1118 ldlm_reprocess_all(lock->l_resource); ldlm_cli_cancel_local()
1131 struct ldlm_lock *lock; ldlm_cancel_pack() local
1144 /* XXX: it would be better to pack lock handles grouped by resource. ldlm_cancel_pack()
1147 list_for_each_entry(lock, head, l_bl_ast) { list_for_each_entry()
1150 LASSERT(lock->l_conn_export); list_for_each_entry()
1151 /* Pack the lock handle to the given request buffer. */ list_for_each_entry()
1152 LDLM_DEBUG(lock, "packing"); list_for_each_entry()
1153 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle; list_for_each_entry()
1160 * Prepare and send a batched cancel RPC. It will include \a count lock
1303 * Client side lock cancel.
1314 struct ldlm_lock *lock; ldlm_cli_cancel() local
1318 lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING); ldlm_cli_cancel()
1319 if (lock == NULL) { ldlm_cli_cancel()
1320 LDLM_DEBUG_NOLOCK("lock is already being destroyed\n"); ldlm_cli_cancel()
1324 rc = ldlm_cli_cancel_local(lock); ldlm_cli_cancel()
1326 LDLM_LOCK_RELEASE(lock); ldlm_cli_cancel()
1329 /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL ldlm_cli_cancel()
1332 LASSERT(list_empty(&lock->l_bl_ast)); ldlm_cli_cancel()
1333 list_add(&lock->l_bl_ast, &cancels); ldlm_cli_cancel()
1335 exp = lock->l_conn_export; ldlm_cli_cancel()
1342 ns = ldlm_lock_to_ns(lock); ldlm_cli_cancel()
1361 struct ldlm_lock *lock, *next; ldlm_cli_cancel_list_local() local
1366 list_for_each_entry_safe(lock, next, cancels, l_bl_ast) { list_for_each_entry_safe()
1372 ldlm_lock_cancel(lock); list_for_each_entry_safe()
1374 rc = ldlm_cli_cancel_local(lock); list_for_each_entry_safe()
1381 LDLM_DEBUG(lock, "Cancel lock separately"); list_for_each_entry_safe()
1382 list_del_init(&lock->l_bl_ast); list_for_each_entry_safe()
1383 list_add(&lock->l_bl_ast, &head); list_for_each_entry_safe()
1389 list_del_init(&lock->l_bl_ast); list_for_each_entry_safe()
1390 LDLM_LOCK_RELEASE(lock); list_for_each_entry_safe()
1409 struct ldlm_lock *lock, ldlm_cancel_no_wait_policy()
1416 lock_res_and_lock(lock); ldlm_cancel_no_wait_policy()
1420 switch (lock->l_resource->lr_type) { ldlm_cancel_no_wait_policy()
1423 if (cb && cb(lock)) ldlm_cancel_no_wait_policy()
1427 lock->l_flags |= LDLM_FL_SKIPPED; ldlm_cancel_no_wait_policy()
1431 unlock_res_and_lock(lock); ldlm_cancel_no_wait_policy()
1437 * \a lock in LRU for current \a LRU size \a unused, added in current
1440 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1442 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1445 struct ldlm_lock *lock, ldlm_cancel_lrur_policy()
1462 lock->l_last_used)); ldlm_cancel_lrur_policy()
1476 * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
1479 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1481 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1484 struct ldlm_lock *lock, ldlm_cancel_passed_policy()
1495 * Callback function for aged policy. Makes decision whether to keep \a lock in
1499 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1501 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1504 struct ldlm_lock *lock, ldlm_cancel_aged_policy()
1508 /* Stop LRU processing if young lock is found and we reach past count */ ldlm_cancel_aged_policy()
1511 cfs_time_add(lock->l_last_used, ns->ns_max_age))) ? ldlm_cancel_aged_policy()
1516 * Callback function for default policy. Makes decision whether to keep \a lock
1520 * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1522 * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1525 struct ldlm_lock *lock, ldlm_cancel_default_policy()
1568 * A client lock can be added to the l_bl_ast list only when it is
1573 * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
1599 struct ldlm_lock *lock, *next; ldlm_prepare_lru_list() local
1623 list_for_each_entry_safe(lock, next, &ns->ns_unused_list, ldlm_prepare_lru_list()
1626 LASSERT(!(lock->l_flags & LDLM_FL_BL_AST)); ldlm_prepare_lru_list()
1629 lock->l_flags & LDLM_FL_SKIPPED) ldlm_prepare_lru_list()
1634 * lock in LRU, do not traverse it again. */ ldlm_prepare_lru_list()
1635 if (!(lock->l_flags & LDLM_FL_CANCELING)) ldlm_prepare_lru_list()
1638 ldlm_lock_remove_from_lru_nolock(lock); ldlm_prepare_lru_list()
1640 if (&lock->l_lru == &ns->ns_unused_list) ldlm_prepare_lru_list()
1643 LDLM_LOCK_GET(lock); ldlm_prepare_lru_list()
1645 lu_ref_add(&lock->l_reference, __func__, current); ldlm_prepare_lru_list()
1647 /* Pass the lock through the policy filter and see if it ldlm_prepare_lru_list()
1651 * we find a lock that should stay in the cache. ldlm_prepare_lru_list()
1652 * We should take into account lock age anyway ldlm_prepare_lru_list()
1653 * as a new lock is a valuable resource even if ldlm_prepare_lru_list()
1660 result = pf(ns, lock, unused, added, count); ldlm_prepare_lru_list()
1662 lu_ref_del(&lock->l_reference, ldlm_prepare_lru_list()
1664 LDLM_LOCK_RELEASE(lock); ldlm_prepare_lru_list()
1669 lu_ref_del(&lock->l_reference, ldlm_prepare_lru_list()
1671 LDLM_LOCK_RELEASE(lock); ldlm_prepare_lru_list()
1676 lock_res_and_lock(lock); ldlm_prepare_lru_list()
1677 /* Check flags again under the lock. */ ldlm_prepare_lru_list()
1678 if ((lock->l_flags & LDLM_FL_CANCELING) || ldlm_prepare_lru_list()
1679 (ldlm_lock_remove_from_lru(lock) == 0)) { ldlm_prepare_lru_list()
1680 /* Another thread is removing lock from LRU, or ldlm_prepare_lru_list()
1683 * by itself, or the lock is no longer unused. */ ldlm_prepare_lru_list()
1684 unlock_res_and_lock(lock); ldlm_prepare_lru_list()
1685 lu_ref_del(&lock->l_reference, ldlm_prepare_lru_list()
1687 LDLM_LOCK_RELEASE(lock); ldlm_prepare_lru_list()
1691 LASSERT(!lock->l_readers && !lock->l_writers); ldlm_prepare_lru_list()
1693 /* If we have chosen to cancel this lock voluntarily, we ldlm_prepare_lru_list()
1697 * silently cancelling this lock. */ ldlm_prepare_lru_list()
1698 lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; ldlm_prepare_lru_list()
1702 * CBPENDING is set, the lock can accumulate no more ldlm_prepare_lru_list()
1706 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; ldlm_prepare_lru_list()
1714 LASSERT(list_empty(&lock->l_bl_ast)); ldlm_prepare_lru_list()
1715 list_add(&lock->l_bl_ast, cancels); ldlm_prepare_lru_list()
1716 unlock_res_and_lock(lock); ldlm_prepare_lru_list()
1717 lu_ref_del(&lock->l_reference, __func__, current); ldlm_prepare_lru_list()
1774 struct ldlm_lock *lock; ldlm_cancel_resource_local() local
1778 list_for_each_entry(lock, &res->lr_granted, l_res_link) { ldlm_cancel_resource_local()
1779 if (opaque != NULL && lock->l_ast_data != opaque) { ldlm_cancel_resource_local()
1780 LDLM_ERROR(lock, "data %p doesn't match opaque %p", ldlm_cancel_resource_local()
1781 lock->l_ast_data, opaque); ldlm_cancel_resource_local()
1785 if (lock->l_readers || lock->l_writers) ldlm_cancel_resource_local()
1789 * skip this lock. */ ldlm_cancel_resource_local()
1790 if (lock->l_flags & LDLM_FL_BL_AST || ldlm_cancel_resource_local()
1791 lock->l_flags & LDLM_FL_CANCELING) ldlm_cancel_resource_local()
1794 if (lockmode_compat(lock->l_granted_mode, mode)) ldlm_cancel_resource_local()
1797 /* If policy is given and this is IBITS lock, add to list only ldlm_cancel_resource_local()
1799 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && ldlm_cancel_resource_local()
1800 !(lock->l_policy_data.l_inodebits.bits & ldlm_cancel_resource_local()
1805 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING | ldlm_cancel_resource_local()
1808 LASSERT(list_empty(&lock->l_bl_ast)); ldlm_cancel_resource_local()
1809 list_add(&lock->l_bl_ast, cancels); ldlm_cancel_resource_local()
1810 LDLM_LOCK_GET(lock); ldlm_cancel_resource_local()
1824 * separately per lock.
1832 struct ldlm_lock *lock; ldlm_cli_cancel_list() local
1845 lock = list_entry(cancels->next, struct ldlm_lock, ldlm_cli_cancel_list()
1847 LASSERT(lock->l_conn_export); ldlm_cli_cancel_list()
1849 if (exp_connect_cancelset(lock->l_conn_export)) { ldlm_cli_cancel_list()
1854 res = ldlm_cli_cancel_req(lock->l_conn_export, ldlm_cli_cancel_list()
1858 res = ldlm_cli_cancel_req(lock->l_conn_export, ldlm_cli_cancel_list()
1905 CERROR("canceling unused lock "DLDLMRES": rc = %d\n", ldlm_cli_cancel_unused_resource()
1969 struct ldlm_lock *lock; ldlm_resource_foreach() local
1977 lock = list_entry(tmp, struct ldlm_lock, l_res_link); ldlm_resource_foreach()
1979 if (iter(lock, closure) == LDLM_ITER_STOP) { ldlm_resource_foreach()
1986 lock = list_entry(tmp, struct ldlm_lock, l_res_link); ldlm_resource_foreach()
1988 if (iter(lock, closure) == LDLM_ITER_STOP) { ldlm_resource_foreach()
1995 lock = list_entry(tmp, struct ldlm_lock, l_res_link); ldlm_resource_foreach()
1997 if (iter(lock, closure) == LDLM_ITER_STOP) { ldlm_resource_foreach()
2013 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure) ldlm_iter_helper() argument
2017 return helper->iter(lock, helper->closure); ldlm_iter_helper()
2045 /* non-blocking function to manipulate a lock whose cb_data is being put away.
2076 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure) ldlm_chain_lock_for_replay() argument
2081 LASSERTF(list_empty(&lock->l_pending_chain), ldlm_chain_lock_for_replay()
2082 "lock %p next %p prev %p\n", ldlm_chain_lock_for_replay()
2083 lock, &lock->l_pending_chain.next, ldlm_chain_lock_for_replay()
2084 &lock->l_pending_chain.prev); ldlm_chain_lock_for_replay()
2087 * on a lock so that it does not disappear under us (e.g. due to cancel) ldlm_chain_lock_for_replay()
2089 if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) { ldlm_chain_lock_for_replay()
2090 list_add(&lock->l_pending_chain, list); ldlm_chain_lock_for_replay()
2091 LDLM_LOCK_GET(lock); ldlm_chain_lock_for_replay()
2101 struct ldlm_lock *lock; replay_lock_interpret() local
2116 lock = ldlm_handle2lock(&aa->lock_handle); replay_lock_interpret()
2117 if (!lock) { replay_lock_interpret()
2126 /* Key change rehash lock in per-export hash with new key */ replay_lock_interpret()
2133 &lock->l_remote_handle, replay_lock_interpret()
2135 &lock->l_exp_hash); replay_lock_interpret()
2137 lock->l_remote_handle = reply->lock_handle; replay_lock_interpret()
2140 LDLM_DEBUG(lock, "replayed lock:"); replay_lock_interpret()
2142 LDLM_LOCK_PUT(lock); replay_lock_interpret()
2150 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) replay_one_lock() argument
2157 /* Bug 11974: Do not replay a lock which is actively being canceled */ replay_one_lock()
2158 if (lock->l_flags & LDLM_FL_CANCELING) { replay_one_lock()
2159 LDLM_DEBUG(lock, "Not replaying canceled lock:"); replay_one_lock()
2163 /* If this is reply-less callback lock, we cannot replay it, since replay_one_lock()
2165 * lost by network. (and server granted conflicting lock already) */ replay_one_lock()
2166 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) { replay_one_lock()
2167 LDLM_DEBUG(lock, "Not replaying reply-less lock:"); replay_one_lock()
2168 ldlm_lock_cancel(lock); replay_one_lock()
2173 * If granted mode matches the requested mode, this lock is granted. replay_one_lock()
2183 * This happens whenever a lock enqueue is the request that triggers replay_one_lock()
2186 if (lock->l_granted_mode == lock->l_req_mode) replay_one_lock()
2188 else if (lock->l_granted_mode) replay_one_lock()
2190 else if (!list_empty(&lock->l_res_link)) replay_one_lock()
2204 ldlm_lock2desc(lock, &body->lock_desc); replay_one_lock()
2207 ldlm_lock2handle(lock, &body->lock_handle[0]); replay_one_lock()
2208 if (lock->l_lvb_len > 0) replay_one_lock()
2211 lock->l_lvb_len); replay_one_lock()
2219 LDLM_DEBUG(lock, "replaying lock:"); replay_one_lock()
2263 struct ldlm_lock *lock, *next; ldlm_replay_locks() local
2280 list_for_each_entry_safe(lock, next, &list, l_pending_chain) { ldlm_replay_locks()
2281 list_del_init(&lock->l_pending_chain); ldlm_replay_locks()
2283 LDLM_LOCK_RELEASE(lock); ldlm_replay_locks()
2286 rc = replay_one_lock(imp, lock); ldlm_replay_locks()
2287 LDLM_LOCK_RELEASE(lock); ldlm_replay_locks()
460 failed_lock_cleanup(struct ldlm_namespace *ns, struct ldlm_lock *lock, int mode) failed_lock_cleanup() argument
1408 ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) ldlm_cancel_no_wait_policy() argument
1444 ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) ldlm_cancel_lrur_policy() argument
1483 ldlm_cancel_passed_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) ldlm_cancel_passed_policy() argument
1503 ldlm_cancel_aged_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) ldlm_cancel_aged_policy() argument
1524 ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock, int unused, int added, int count) ldlm_cancel_default_policy() argument
H A Dldlm_lockd.c125 int ldlm_del_waiting_lock(struct ldlm_lock *lock) ldlm_del_waiting_lock() argument
130 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout) ldlm_refresh_waiting_lock() argument
143 struct ldlm_lock_desc *ld, struct ldlm_lock *lock) ldlm_handle_bl_callback()
147 LDLM_DEBUG(lock, "client blocking AST callback handler"); ldlm_handle_bl_callback()
149 lock_res_and_lock(lock); ldlm_handle_bl_callback()
150 lock->l_flags |= LDLM_FL_CBPENDING; ldlm_handle_bl_callback()
152 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) ldlm_handle_bl_callback()
153 lock->l_flags |= LDLM_FL_CANCEL; ldlm_handle_bl_callback()
155 do_ast = !lock->l_readers && !lock->l_writers; ldlm_handle_bl_callback()
156 unlock_res_and_lock(lock); ldlm_handle_bl_callback()
160 "Lock %p already unused, calling callback (%p)\n", lock, ldlm_handle_bl_callback()
161 lock->l_blocking_ast); ldlm_handle_bl_callback()
162 if (lock->l_blocking_ast != NULL) ldlm_handle_bl_callback()
163 lock->l_blocking_ast(lock, ld, lock->l_ast_data, ldlm_handle_bl_callback()
168 lock); ldlm_handle_bl_callback()
171 LDLM_DEBUG(lock, "client blocking callback handler END"); ldlm_handle_bl_callback()
172 LDLM_LOCK_RELEASE(lock); ldlm_handle_bl_callback()
183 struct ldlm_lock *lock) ldlm_handle_cp_callback()
189 LDLM_DEBUG(lock, "client completion callback handler START"); ldlm_handle_cp_callback()
197 if (lock->l_granted_mode == lock->l_req_mode || ldlm_handle_cp_callback()
198 lock->l_flags & LDLM_FL_DESTROYED) ldlm_handle_cp_callback()
205 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len); ldlm_handle_cp_callback()
209 if (lock->l_lvb_len > 0) { ldlm_handle_cp_callback()
210 /* for extent lock, lvb contains ost_lvb{}. */ ldlm_handle_cp_callback()
211 LASSERT(lock->l_lvb_data != NULL); ldlm_handle_cp_callback()
213 if (unlikely(lock->l_lvb_len < lvb_len)) { ldlm_handle_cp_callback()
214 LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d", ldlm_handle_cp_callback()
215 lock->l_lvb_len, lvb_len); ldlm_handle_cp_callback()
219 } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has ldlm_handle_cp_callback()
225 LDLM_ERROR(lock, "No memory: %d.\n", lvb_len); ldlm_handle_cp_callback()
230 lock_res_and_lock(lock); ldlm_handle_cp_callback()
231 LASSERT(lock->l_lvb_data == NULL); ldlm_handle_cp_callback()
232 lock->l_lvb_type = LVB_T_LAYOUT; ldlm_handle_cp_callback()
233 lock->l_lvb_data = lvb_data; ldlm_handle_cp_callback()
234 lock->l_lvb_len = lvb_len; ldlm_handle_cp_callback()
235 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
239 lock_res_and_lock(lock); ldlm_handle_cp_callback()
240 if ((lock->l_flags & LDLM_FL_DESTROYED) || ldlm_handle_cp_callback()
241 lock->l_granted_mode == lock->l_req_mode) { ldlm_handle_cp_callback()
242 /* bug 11300: the lock has already been granted */ ldlm_handle_cp_callback()
243 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
244 LDLM_DEBUG(lock, "Double grant race happened"); ldlm_handle_cp_callback()
250 * then we might need to switch lock modes, resources, or extents. */ ldlm_handle_cp_callback()
251 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) { ldlm_handle_cp_callback()
252 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode; ldlm_handle_cp_callback()
253 LDLM_DEBUG(lock, "completion AST, new lock mode"); ldlm_handle_cp_callback()
256 if (lock->l_resource->lr_type != LDLM_PLAIN) { ldlm_handle_cp_callback()
260 &lock->l_policy_data); ldlm_handle_cp_callback()
261 LDLM_DEBUG(lock, "completion AST, new policy data"); ldlm_handle_cp_callback()
264 ldlm_resource_unlink_lock(lock); ldlm_handle_cp_callback()
266 &lock->l_resource->lr_name, ldlm_handle_cp_callback()
267 sizeof(lock->l_resource->lr_name)) != 0) { ldlm_handle_cp_callback()
268 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
269 rc = ldlm_lock_change_resource(ns, lock, ldlm_handle_cp_callback()
272 LDLM_ERROR(lock, "Failed to allocate resource"); ldlm_handle_cp_callback()
275 LDLM_DEBUG(lock, "completion AST, new resource"); ldlm_handle_cp_callback()
277 lock_res_and_lock(lock); ldlm_handle_cp_callback()
283 ldlm_lock_remove_from_lru(lock); ldlm_handle_cp_callback()
284 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; ldlm_handle_cp_callback()
285 LDLM_DEBUG(lock, "completion AST includes blocking AST"); ldlm_handle_cp_callback()
288 if (lock->l_lvb_len > 0) { ldlm_handle_cp_callback()
289 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT, ldlm_handle_cp_callback()
290 lock->l_lvb_data, lvb_len); ldlm_handle_cp_callback()
292 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
297 ldlm_grant_lock(lock, &ast_list); ldlm_handle_cp_callback()
298 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
300 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work"); ldlm_handle_cp_callback()
308 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)", ldlm_handle_cp_callback()
309 lock); ldlm_handle_cp_callback()
314 lock_res_and_lock(lock); ldlm_handle_cp_callback()
315 lock->l_flags |= LDLM_FL_FAILED; ldlm_handle_cp_callback()
316 unlock_res_and_lock(lock); ldlm_handle_cp_callback()
317 wake_up(&lock->l_waitq); ldlm_handle_cp_callback()
319 LDLM_LOCK_RELEASE(lock); ldlm_handle_cp_callback()
326 * we also consider dropping the lock here if it is unused locally for a
332 struct ldlm_lock *lock) ldlm_handle_gl_callback()
336 LDLM_DEBUG(lock, "client glimpse AST callback handler"); ldlm_handle_gl_callback()
338 if (lock->l_glimpse_ast != NULL) ldlm_handle_gl_callback()
339 rc = lock->l_glimpse_ast(lock, req); ldlm_handle_gl_callback()
348 lock_res_and_lock(lock); ldlm_handle_gl_callback()
349 if (lock->l_granted_mode == LCK_PW && ldlm_handle_gl_callback()
350 !lock->l_readers && !lock->l_writers && ldlm_handle_gl_callback()
352 cfs_time_add(lock->l_last_used, ldlm_handle_gl_callback()
354 unlock_res_and_lock(lock); ldlm_handle_gl_callback()
355 if (ldlm_bl_to_thread_lock(ns, NULL, lock)) ldlm_handle_gl_callback()
356 ldlm_handle_bl_callback(ns, NULL, lock); ldlm_handle_gl_callback()
360 unlock_res_and_lock(lock); ldlm_handle_gl_callback()
361 LDLM_LOCK_RELEASE(lock); ldlm_handle_gl_callback()
408 struct ldlm_lock *lock, init_blwi()
426 blwi->blwi_lock = lock; init_blwi()
433 * then the lock referenced as \a lock is queued instead.
435 * The blocking thread would then call ->l_blocking_ast callback in the lock.
441 struct ldlm_lock *lock, ldlm_bl_to_thread()
454 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags); ldlm_bl_to_thread()
464 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags); ldlm_bl_to_thread()
471 struct ldlm_lock *lock) ldlm_bl_to_thread_lock()
473 return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC); ldlm_bl_to_thread_lock()
531 "%s: [nid %s] [rc %d] [lock %#llx]", ldlm_callback_errmsg()
562 struct ldlm_lock *lock; ldlm_callback_handler() local
630 /* Force a known safe race, send a cancel to the server for a lock ldlm_callback_handler()
639 lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0); ldlm_callback_handler()
640 if (!lock) { ldlm_callback_handler()
641 CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n", ldlm_callback_handler()
649 if ((lock->l_flags & LDLM_FL_FAIL_LOC) && ldlm_callback_handler()
654 lock_res_and_lock(lock); ldlm_callback_handler()
655 lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & ldlm_callback_handler()
658 /* If somebody cancels lock and cache is already dropped, ldlm_callback_handler()
659 * or lock is failed before cp_ast received on client, ldlm_callback_handler()
660 * we can tell the server we have no lock. Otherwise, we ldlm_callback_handler()
662 if (((lock->l_flags & LDLM_FL_CANCELING) && ldlm_callback_handler()
663 (lock->l_flags & LDLM_FL_BL_DONE)) || ldlm_callback_handler()
664 (lock->l_flags & LDLM_FL_FAILED)) { ldlm_callback_handler()
665 LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n", ldlm_callback_handler()
667 unlock_res_and_lock(lock); ldlm_callback_handler()
668 LDLM_LOCK_RELEASE(lock); ldlm_callback_handler()
670 ldlm_callback_errmsg(req, "Operate on stale lock", rc, ldlm_callback_handler()
676 ldlm_lock_remove_from_lru(lock); ldlm_callback_handler()
677 lock->l_flags |= LDLM_FL_BL_AST; ldlm_callback_handler()
679 unlock_res_and_lock(lock); ldlm_callback_handler()
694 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) { ldlm_callback_handler()
700 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock)) ldlm_callback_handler()
701 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock); ldlm_callback_handler()
707 ldlm_handle_cp_callback(req, ns, dlm_req, lock); ldlm_callback_handler()
712 ldlm_handle_gl_callback(req, ns, dlm_req, lock); ldlm_callback_handler()
901 * Export handle<->lock hash operations.
912 struct ldlm_lock *lock; ldlm_export_lock_key() local
914 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash); ldlm_export_lock_key()
915 return &lock->l_remote_handle; ldlm_export_lock_key()
921 struct ldlm_lock *lock; ldlm_export_lock_keycpy() local
923 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash); ldlm_export_lock_keycpy()
924 lock->l_remote_handle = *(struct lustre_handle *)key; ldlm_export_lock_keycpy()
942 struct ldlm_lock *lock; ldlm_export_lock_get() local
944 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash); ldlm_export_lock_get()
945 LDLM_LOCK_GET(lock); ldlm_export_lock_get()
951 struct ldlm_lock *lock; ldlm_export_lock_put() local
953 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash); ldlm_export_lock_put()
954 LDLM_LOCK_RELEASE(lock); ldlm_export_lock_put()
142 ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock) ldlm_handle_bl_callback() argument
180 ldlm_handle_cp_callback(struct ptlrpc_request *req, struct ldlm_namespace *ns, struct ldlm_request *dlm_req, struct ldlm_lock *lock) ldlm_handle_cp_callback() argument
329 ldlm_handle_gl_callback(struct ptlrpc_request *req, struct ldlm_namespace *ns, struct ldlm_request *dlm_req, struct ldlm_lock *lock) ldlm_handle_gl_callback() argument
404 init_blwi(struct ldlm_bl_work_item *blwi, struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, struct ldlm_lock *lock, ldlm_cancel_flags_t cancel_flags) init_blwi() argument
439 ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock, struct list_head *cancels, int count, ldlm_cancel_flags_t cancel_flags) ldlm_bl_to_thread() argument
470 ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock) ldlm_bl_to_thread_lock() argument
H A Dldlm_extent.c43 * This file contains implementation of EXTENT lock type
45 * EXTENT lock type is for locking a contiguous range of values, represented
47 * lock modes, some of which may be mutually incompatible. Extent locks are
49 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
62 /* When a lock is cancelled by a client, the KMS may undergo change if this
63 * is the "highest lock". This function returns the new KMS value.
66 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */ ldlm_extent_shift_kms()
67 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) ldlm_extent_shift_kms() argument
69 struct ldlm_resource *res = lock->l_resource; ldlm_extent_shift_kms()
75 * just after we finish and take our lock into account in its ldlm_extent_shift_kms()
77 lock->l_flags |= LDLM_FL_KMS_IGNORE; ldlm_extent_shift_kms()
100 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) ldlm_interval_alloc() argument
104 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); ldlm_interval_alloc()
110 ldlm_interval_attach(node, lock); ldlm_interval_alloc()
160 /** Add newly granted lock into interval tree for the resource. */ ldlm_extent_add_lock()
162 struct ldlm_lock *lock) ldlm_extent_add_lock()
169 LASSERT(lock->l_granted_mode == lock->l_req_mode); ldlm_extent_add_lock()
171 node = lock->l_tree_node; ldlm_extent_add_lock()
175 idx = lock_mode_to_index(lock->l_granted_mode); ldlm_extent_add_lock()
176 LASSERT(lock->l_granted_mode == 1 << idx); ldlm_extent_add_lock()
177 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode); ldlm_extent_add_lock()
180 extent = &lock->l_policy_data.l_extent; ldlm_extent_add_lock()
188 tmp = ldlm_interval_detach(lock); ldlm_extent_add_lock()
191 ldlm_interval_attach(to_ldlm_interval(found), lock); ldlm_extent_add_lock() local
195 /* even though we use interval tree to manage the extent lock, we also ldlm_extent_add_lock()
197 ldlm_resource_add_lock(res, &res->lr_granted, lock); ldlm_extent_add_lock()
200 /** Remove cancelled lock from resource interval tree. */ ldlm_extent_unlink_lock()
201 void ldlm_extent_unlink_lock(struct ldlm_lock *lock) ldlm_extent_unlink_lock() argument
203 struct ldlm_resource *res = lock->l_resource; ldlm_extent_unlink_lock()
204 struct ldlm_interval *node = lock->l_tree_node; ldlm_extent_unlink_lock()
211 idx = lock_mode_to_index(lock->l_granted_mode); ldlm_extent_unlink_lock()
212 LASSERT(lock->l_granted_mode == 1 << idx); ldlm_extent_unlink_lock()
218 node = ldlm_interval_detach(lock); ldlm_extent_unlink_lock()
161 ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock) ldlm_extent_add_lock() argument
H A Dldlm_internal.h115 int ldlm_get_enq_timeout(struct ldlm_lock *lock);
141 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
142 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
155 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
160 int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
161 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
162 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
163 void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
164 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock);
165 void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
171 struct ldlm_lock *lock);
178 struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
186 void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
187 void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
215 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
217 /* this function must be called with res lock held */
221 struct ldlm_lock *lock; ldlm_interval_extent() local
225 lock = list_entry(node->li_group.next, struct ldlm_lock, ldlm_interval_extent()
227 return &lock->l_policy_data.l_extent; ldlm_interval_extent()
278 static inline int is_granted_or_cancelled(struct ldlm_lock *lock) is_granted_or_cancelled() argument
282 lock_res_and_lock(lock); is_granted_or_cancelled()
283 if (((lock->l_req_mode == lock->l_granted_mode) && is_granted_or_cancelled()
284 !(lock->l_flags & LDLM_FL_CP_REQD)) || is_granted_or_cancelled()
285 (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL))) is_granted_or_cancelled()
287 unlock_res_and_lock(lock); is_granted_or_cancelled()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dcl_lock.c71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
77 const struct cl_lock *lock) cl_lock_invariant_trusted()
79 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && cl_lock_invariant_trusted()
80 atomic_read(&lock->cll_ref) >= lock->cll_holds && cl_lock_invariant_trusted()
81 lock->cll_holds >= lock->cll_users && cl_lock_invariant_trusted()
82 lock->cll_holds >= 0 && cl_lock_invariant_trusted()
83 lock->cll_users >= 0 && cl_lock_invariant_trusted()
84 lock->cll_depth >= 0; cl_lock_invariant_trusted()
88 * Stronger lock invariant, checking that caller has a reference on a lock.
93 const struct cl_lock *lock) cl_lock_invariant()
97 result = atomic_read(&lock->cll_ref) > 0 && cl_lock_invariant()
98 cl_lock_invariant_trusted(env, lock); cl_lock_invariant()
100 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); cl_lock_invariant()
105 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
107 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock) cl_lock_nesting() argument
109 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting; cl_lock_nesting()
113 * Returns a set of counters for this lock, depending on a lock nesting.
116 const struct cl_lock *lock) cl_lock_counters()
122 nesting = cl_lock_nesting(lock); cl_lock_counters()
128 const char *prefix, const struct cl_lock *lock, cl_lock_trace0()
131 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); cl_lock_trace0()
133 prefix, lock, atomic_read(&lock->cll_ref), cl_lock_trace0()
134 lock->cll_guarder, lock->cll_depth, cl_lock_trace0()
135 lock->cll_state, lock->cll_error, lock->cll_holds, cl_lock_trace0()
136 lock->cll_users, lock->cll_flags, cl_lock_trace0()
140 #define cl_lock_trace(level, env, prefix, lock) \
141 cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
148 static void cl_lock_lockdep_init(struct cl_lock *lock) cl_lock_lockdep_init() argument
150 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT"); cl_lock_lockdep_init()
154 struct cl_lock *lock, __u32 enqflags) cl_lock_lockdep_acquire()
156 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++; cl_lock_lockdep_acquire()
157 lock_map_acquire(&lock->dep_map); cl_lock_lockdep_acquire()
161 struct cl_lock *lock) cl_lock_lockdep_release()
163 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--; cl_lock_lockdep_release()
164 lock_release(&lock->dep_map, 0, RETIP); cl_lock_lockdep_release()
169 static void cl_lock_lockdep_init(struct cl_lock *lock) cl_lock_lockdep_init() argument
172 struct cl_lock *lock, __u32 enqflags) cl_lock_lockdep_acquire()
175 struct cl_lock *lock) cl_lock_lockdep_release()
181 * Adds lock slice to the compound lock.
184 * per-layer state to the lock. New state is added at the end of
189 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice, cl_lock_slice_add() argument
193 slice->cls_lock = lock; cl_lock_slice_add()
194 list_add_tail(&slice->cls_linkage, &lock->cll_layers); cl_lock_slice_add()
201 * Returns true iff a lock with the mode \a has provides at least the same
202 * guarantees as a lock with the mode \a need.
222 * Returns true iff extent portions of lock descriptions match.
236 * Returns true iff a lock with the description \a has provides at least the
237 * same guarantees as a lock with the description \a need.
248 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) cl_lock_free() argument
250 struct cl_object *obj = lock->cll_descr.cld_obj; cl_lock_free()
252 LINVRNT(!cl_lock_is_mutexed(lock)); cl_lock_free()
254 cl_lock_trace(D_DLMTRACE, env, "free lock", lock); cl_lock_free()
256 while (!list_empty(&lock->cll_layers)) { cl_lock_free()
259 slice = list_entry(lock->cll_layers.next, cl_lock_free()
261 list_del_init(lock->cll_layers.next); cl_lock_free()
265 CS_LOCKSTATE_DEC(obj, lock->cll_state); cl_lock_free()
266 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock); cl_lock_free()
268 lu_ref_fini(&lock->cll_reference); cl_lock_free()
269 lu_ref_fini(&lock->cll_holders); cl_lock_free()
270 mutex_destroy(&lock->cll_guard); cl_lock_free()
271 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem); cl_lock_free()
275 * Releases a reference on a lock.
277 * When last reference is released, lock is returned to the cache, unless it
283 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) cl_lock_put() argument
287 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_put()
288 obj = lock->cll_descr.cld_obj; cl_lock_put()
292 atomic_read(&lock->cll_ref), lock, RETIP); cl_lock_put()
294 if (atomic_dec_and_test(&lock->cll_ref)) { cl_lock_put()
295 if (lock->cll_state == CLS_FREEING) { cl_lock_put()
296 LASSERT(list_empty(&lock->cll_linkage)); cl_lock_put()
297 cl_lock_free(env, lock); cl_lock_put()
305 * Acquires an additional reference to a lock.
308 * lock.
312 void cl_lock_get(struct cl_lock *lock) cl_lock_get() argument
314 LINVRNT(cl_lock_invariant(NULL, lock)); cl_lock_get()
316 atomic_read(&lock->cll_ref), lock, RETIP); cl_lock_get()
317 atomic_inc(&lock->cll_ref); cl_lock_get()
322 * Acquires a reference to a lock.
325 * acquire initial reference to the cached lock. Caller has to deal with all
330 void cl_lock_get_trust(struct cl_lock *lock) cl_lock_get_trust() argument
333 atomic_read(&lock->cll_ref), lock, RETIP); cl_lock_get_trust()
334 if (atomic_inc_return(&lock->cll_ref) == 1) cl_lock_get_trust()
335 CS_LOCK_INC(lock->cll_descr.cld_obj, busy); cl_lock_get_trust()
340 * Helper function destroying the lock that wasn't completely initialized.
342 * Other threads can acquire references to the top-lock through its
345 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock) cl_lock_finish() argument
347 cl_lock_mutex_get(env, lock); cl_lock_finish()
348 cl_lock_cancel(env, lock); cl_lock_finish()
349 cl_lock_delete(env, lock); cl_lock_finish()
350 cl_lock_mutex_put(env, lock); cl_lock_finish()
351 cl_lock_put(env, lock); cl_lock_finish()
359 struct cl_lock *lock; cl_lock_alloc() local
362 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS); cl_lock_alloc()
363 if (lock != NULL) { cl_lock_alloc()
364 atomic_set(&lock->cll_ref, 1); cl_lock_alloc()
365 lock->cll_descr = *descr; cl_lock_alloc()
366 lock->cll_state = CLS_NEW; cl_lock_alloc()
368 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", cl_lock_alloc()
369 lock); cl_lock_alloc()
370 INIT_LIST_HEAD(&lock->cll_layers); cl_lock_alloc()
371 INIT_LIST_HEAD(&lock->cll_linkage); cl_lock_alloc()
372 INIT_LIST_HEAD(&lock->cll_inclosure); cl_lock_alloc()
373 lu_ref_init(&lock->cll_reference); cl_lock_alloc()
374 lu_ref_init(&lock->cll_holders); cl_lock_alloc()
375 mutex_init(&lock->cll_guard); cl_lock_alloc()
376 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class); cl_lock_alloc()
377 init_waitqueue_head(&lock->cll_wq); cl_lock_alloc()
382 cl_lock_lockdep_init(lock); cl_lock_alloc()
387 err = obj->co_ops->coo_lock_init(env, obj, lock, io); cl_lock_alloc()
389 cl_lock_finish(env, lock); cl_lock_alloc()
390 lock = ERR_PTR(err); cl_lock_alloc()
395 lock = ERR_PTR(-ENOMEM); cl_lock_alloc()
396 return lock; cl_lock_alloc()
400 * Transfer the lock into INTRANSIT state and return the original state.
407 struct cl_lock *lock) cl_lock_intransit()
409 enum cl_lock_state state = lock->cll_state; cl_lock_intransit()
411 LASSERT(cl_lock_is_mutexed(lock)); cl_lock_intransit()
414 "Malformed lock state %d.\n", state); cl_lock_intransit()
416 cl_lock_state_set(env, lock, CLS_INTRANSIT); cl_lock_intransit()
417 lock->cll_intransit_owner = current; cl_lock_intransit()
418 cl_lock_hold_add(env, lock, "intransit", current); cl_lock_intransit()
424 * Exit the intransit state and restore the lock state to the original state
426 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock, cl_lock_extransit() argument
429 LASSERT(cl_lock_is_mutexed(lock)); cl_lock_extransit()
430 LASSERT(lock->cll_state == CLS_INTRANSIT); cl_lock_extransit()
432 LASSERT(lock->cll_intransit_owner == current); cl_lock_extransit()
434 lock->cll_intransit_owner = NULL; cl_lock_extransit()
435 cl_lock_state_set(env, lock, state); cl_lock_extransit()
436 cl_lock_unhold(env, lock, "intransit", current); cl_lock_extransit()
441 * Checking whether the lock is intransit state
443 int cl_lock_is_intransit(struct cl_lock *lock) cl_lock_is_intransit() argument
445 LASSERT(cl_lock_is_mutexed(lock)); cl_lock_is_intransit()
446 return lock->cll_state == CLS_INTRANSIT && cl_lock_is_intransit()
447 lock->cll_intransit_owner != current; cl_lock_is_intransit()
451 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
456 const struct cl_lock *lock, cl_lock_fits_into()
462 LINVRNT(cl_lock_invariant_trusted(env, lock)); cl_lock_fits_into()
463 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_lock_fits_into()
476 struct cl_lock *lock; cl_lock_lookup() local
482 list_for_each_entry(lock, &head->coh_locks, cll_linkage) { cl_lock_lookup()
485 matched = cl_lock_ext_match(&lock->cll_descr, need) && cl_lock_lookup()
486 lock->cll_state < CLS_FREEING && cl_lock_lookup()
487 lock->cll_error == 0 && cl_lock_lookup()
488 !(lock->cll_flags & CLF_CANCELLED) && cl_lock_lookup()
489 cl_lock_fits_into(env, lock, need, io); cl_lock_lookup()
491 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need), cl_lock_lookup()
494 cl_lock_get_trust(lock); cl_lock_lookup()
496 return lock; cl_lock_lookup()
503 * Returns a lock matching description \a need.
506 * cache (implemented as a per-object linked list) is consulted. If lock is
507 * found there, it is returned immediately. Otherwise new lock is allocated
508 * and returned. In any case, additional reference to lock is acquired.
518 struct cl_lock *lock; cl_lock_find() local
524 lock = cl_lock_lookup(env, obj, io, need); cl_lock_find()
527 if (lock == NULL) { cl_lock_find()
528 lock = cl_lock_alloc(env, obj, io, need); cl_lock_find()
529 if (!IS_ERR(lock)) { cl_lock_find()
535 cl_lock_get_trust(lock); cl_lock_find()
536 list_add_tail(&lock->cll_linkage, cl_lock_find()
544 * top-lock through its sub-locks. Hence, it cl_lock_find()
547 cl_lock_finish(env, lock); cl_lock_find()
548 lock = ghost; cl_lock_find()
552 return lock; cl_lock_find()
556 * Returns existing lock matching given description. This is similar to
557 * cl_lock_find() except that no new lock is created, and returned lock is
566 struct cl_lock *lock; cl_lock_peek() local
573 lock = cl_lock_lookup(env, obj, io, need); cl_lock_peek()
575 if (lock == NULL) cl_lock_peek()
578 cl_lock_mutex_get(env, lock); cl_lock_peek()
579 if (lock->cll_state == CLS_INTRANSIT) cl_lock_peek()
581 cl_lock_state_wait(env, lock); cl_lock_peek()
582 if (lock->cll_state == CLS_FREEING) { cl_lock_peek()
583 cl_lock_mutex_put(env, lock); cl_lock_peek()
584 cl_lock_put(env, lock); cl_lock_peek()
585 lock = NULL; cl_lock_peek()
587 } while (lock == NULL); cl_lock_peek()
589 cl_lock_hold_add(env, lock, scope, source); cl_lock_peek()
590 cl_lock_user_add(env, lock); cl_lock_peek()
591 if (lock->cll_state == CLS_CACHED) cl_lock_peek()
592 cl_use_try(env, lock, 1); cl_lock_peek()
593 if (lock->cll_state == CLS_HELD) { cl_lock_peek()
594 cl_lock_mutex_put(env, lock); cl_lock_peek()
595 cl_lock_lockdep_acquire(env, lock, 0); cl_lock_peek()
596 cl_lock_put(env, lock); cl_lock_peek()
598 cl_unuse_try(env, lock); cl_lock_peek()
599 cl_lock_unhold(env, lock, scope, source); cl_lock_peek()
600 cl_lock_mutex_put(env, lock); cl_lock_peek()
601 cl_lock_put(env, lock); cl_lock_peek()
602 lock = NULL; cl_lock_peek()
605 return lock; cl_lock_peek()
610 * Returns a slice within a lock, corresponding to the given layer in the
615 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, cl_lock_at() argument
620 LINVRNT(cl_lock_invariant_trusted(NULL, lock)); cl_lock_at()
622 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_lock_at()
630 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock) cl_lock_mutex_tail() argument
634 counters = cl_lock_counters(env, lock); cl_lock_mutex_tail()
635 lock->cll_depth++; cl_lock_mutex_tail()
637 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock); cl_lock_mutex_tail()
638 cl_lock_trace(D_TRACE, env, "got mutex", lock); cl_lock_mutex_tail()
645 * transitions in the lock state machine.
647 * \post cl_lock_is_mutexed(lock)
651 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock) cl_lock_mutex_get() argument
653 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_mutex_get()
655 if (lock->cll_guarder == current) { cl_lock_mutex_get()
656 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_mutex_get()
657 LINVRNT(lock->cll_depth > 0); cl_lock_mutex_get()
663 LINVRNT(lock->cll_guarder != current); cl_lock_mutex_get()
664 hdr = cl_object_header(lock->cll_descr.cld_obj); cl_lock_mutex_get()
671 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting); cl_lock_mutex_get()
672 lock->cll_guarder = current; cl_lock_mutex_get()
673 LINVRNT(lock->cll_depth == 0); cl_lock_mutex_get()
675 cl_lock_mutex_tail(env, lock); cl_lock_mutex_get()
682 * \retval 0 \a lock was successfully locked
684 * \retval -EBUSY \a lock cannot be locked right now
686 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
690 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock) cl_lock_mutex_try() argument
694 LINVRNT(cl_lock_invariant_trusted(env, lock)); cl_lock_mutex_try()
697 if (lock->cll_guarder == current) { cl_lock_mutex_try()
698 LINVRNT(lock->cll_depth > 0); cl_lock_mutex_try()
699 cl_lock_mutex_tail(env, lock); cl_lock_mutex_try()
700 } else if (mutex_trylock(&lock->cll_guard)) { cl_lock_mutex_try()
701 LINVRNT(lock->cll_depth == 0); cl_lock_mutex_try()
702 lock->cll_guarder = current; cl_lock_mutex_try()
703 cl_lock_mutex_tail(env, lock); cl_lock_mutex_try()
713 * \pre cl_lock_is_mutexed(lock)
717 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock) cl_lock_mutex_put() argument
721 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_mutex_put()
722 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_mutex_put()
723 LINVRNT(lock->cll_guarder == current); cl_lock_mutex_put()
724 LINVRNT(lock->cll_depth > 0); cl_lock_mutex_put()
726 counters = cl_lock_counters(env, lock); cl_lock_mutex_put()
729 cl_lock_trace(D_TRACE, env, "put mutex", lock); cl_lock_mutex_put()
730 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock); cl_lock_mutex_put()
732 if (--lock->cll_depth == 0) { cl_lock_mutex_put()
733 lock->cll_guarder = NULL; cl_lock_mutex_put()
734 mutex_unlock(&lock->cll_guard); cl_lock_mutex_put()
740 * Returns true iff lock's mutex is owned by the current thread.
742 int cl_lock_is_mutexed(struct cl_lock *lock) cl_lock_is_mutexed() argument
744 return lock->cll_guarder == current; cl_lock_is_mutexed()
769 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) cl_lock_cancel0() argument
771 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_cancel0()
772 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_cancel0()
773 if (!(lock->cll_flags & CLF_CANCELLED)) { cl_lock_cancel0()
776 lock->cll_flags |= CLF_CANCELLED; cl_lock_cancel0()
777 list_for_each_entry_reverse(slice, &lock->cll_layers, cl_lock_cancel0()
785 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) cl_lock_delete0() argument
790 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_delete0()
791 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_delete0()
793 if (lock->cll_state < CLS_FREEING) { cl_lock_delete0()
796 LASSERT(lock->cll_state != CLS_INTRANSIT); cl_lock_delete0()
797 cl_lock_state_set(env, lock, CLS_FREEING); cl_lock_delete0()
799 head = cl_object_header(lock->cll_descr.cld_obj); cl_lock_delete0()
802 in_cache = !list_empty(&lock->cll_linkage); cl_lock_delete0()
804 list_del_init(&lock->cll_linkage); cl_lock_delete0()
808 cl_lock_put(env, lock); cl_lock_delete0()
811 * From now on, no new references to this lock can be acquired cl_lock_delete0()
814 list_for_each_entry_reverse(slice, &lock->cll_layers, cl_lock_delete0()
820 * From now on, no new references to this lock can be acquired cl_lock_delete0()
822 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in cl_lock_delete0()
832 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
833 * top-lock (nesting == 0) accounts for this modification in the per-thread
834 * debugging counters. Sub-lock holds can be released by a thread different
837 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock, cl_lock_hold_mod() argument
843 lock->cll_holds += delta; cl_lock_hold_mod()
844 nesting = cl_lock_nesting(lock); cl_lock_hold_mod()
853 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
856 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock, cl_lock_used_mod() argument
862 lock->cll_users += delta; cl_lock_used_mod()
863 nesting = cl_lock_nesting(lock); cl_lock_used_mod()
871 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, cl_lock_hold_release() argument
874 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_hold_release()
875 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_hold_release()
876 LASSERT(lock->cll_holds > 0); cl_lock_hold_release()
878 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock); cl_lock_hold_release()
879 lu_ref_del(&lock->cll_holders, scope, source); cl_lock_hold_release()
880 cl_lock_hold_mod(env, lock, -1); cl_lock_hold_release()
881 if (lock->cll_holds == 0) { cl_lock_hold_release()
882 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock); cl_lock_hold_release()
883 if (lock->cll_descr.cld_mode == CLM_PHANTOM || cl_lock_hold_release()
884 lock->cll_descr.cld_mode == CLM_GROUP || cl_lock_hold_release()
885 lock->cll_state != CLS_CACHED) cl_lock_hold_release()
887 * If lock is still phantom or grouplock when user is cl_lock_hold_release()
888 * done with it---destroy the lock. cl_lock_hold_release()
890 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED; cl_lock_hold_release()
891 if (lock->cll_flags & CLF_CANCELPEND) { cl_lock_hold_release()
892 lock->cll_flags &= ~CLF_CANCELPEND; cl_lock_hold_release()
893 cl_lock_cancel0(env, lock); cl_lock_hold_release()
895 if (lock->cll_flags & CLF_DOOMED) { cl_lock_hold_release()
897 lock->cll_flags &= ~CLF_DOOMED; cl_lock_hold_release()
898 cl_lock_delete0(env, lock); cl_lock_hold_release()
905 * Waits until lock state is changed.
908 * mutex and goes to sleep, waiting for a lock state change (signaled by
911 * This function is used to wait until lock state machine makes some progress
912 * and to emulate synchronous operations on top of asynchronous lock
919 * \pre cl_lock_is_mutexed(lock)
923 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) cl_lock_state_wait() argument
929 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_state_wait()
930 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_state_wait()
931 LASSERT(lock->cll_depth == 1); cl_lock_state_wait()
932 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */ cl_lock_state_wait()
934 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock); cl_lock_state_wait()
935 result = lock->cll_error; cl_lock_state_wait()
943 add_wait_queue(&lock->cll_wq, &waiter); cl_lock_state_wait()
945 cl_lock_mutex_put(env, lock); cl_lock_state_wait()
958 cl_lock_mutex_get(env, lock); cl_lock_state_wait()
960 remove_wait_queue(&lock->cll_wq, &waiter); cl_lock_state_wait()
969 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, cl_lock_state_signal() argument
974 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_state_signal()
975 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_state_signal()
977 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) cl_lock_state_signal()
980 wake_up_all(&lock->cll_wq); cl_lock_state_signal()
984 * Notifies waiters that lock state changed.
990 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock) cl_lock_signal() argument
992 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock); cl_lock_signal()
993 cl_lock_state_signal(env, lock, lock->cll_state); cl_lock_signal()
998 * Changes lock state.
1000 * This function is invoked to notify layers that lock state changed, possible
1003 * \post lock->cll_state == state
1007 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, cl_lock_state_set() argument
1010 LASSERT(lock->cll_state <= state || cl_lock_state_set()
1011 (lock->cll_state == CLS_CACHED && cl_lock_state_set()
1012 (state == CLS_HELD || /* lock found in cache */ cl_lock_state_set()
1013 state == CLS_NEW || /* sub-lock canceled */ cl_lock_state_set()
1015 /* lock is in transit state */ cl_lock_state_set()
1016 lock->cll_state == CLS_INTRANSIT); cl_lock_state_set()
1018 if (lock->cll_state != state) { cl_lock_state_set()
1019 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state); cl_lock_state_set()
1020 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state); cl_lock_state_set()
1022 cl_lock_state_signal(env, lock, state); cl_lock_state_set()
1023 lock->cll_state = state; cl_lock_state_set()
1028 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) cl_unuse_try_internal() argument
1036 LINVRNT(cl_lock_is_mutexed(lock)); cl_unuse_try_internal()
1037 LINVRNT(cl_lock_invariant(env, lock)); cl_unuse_try_internal()
1038 LASSERT(lock->cll_state == CLS_INTRANSIT); cl_unuse_try_internal()
1041 list_for_each_entry_reverse(slice, &lock->cll_layers, cl_unuse_try_internal()
1056 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1058 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1061 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) cl_use_try() argument
1067 cl_lock_trace(D_DLMTRACE, env, "use lock", lock); cl_use_try()
1069 LASSERT(lock->cll_state == CLS_CACHED); cl_use_try()
1070 if (lock->cll_error) cl_use_try()
1071 return lock->cll_error; cl_use_try()
1074 state = cl_lock_intransit(env, lock); cl_use_try()
1075 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_use_try()
1084 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n", cl_use_try()
1085 lock->cll_state); cl_use_try()
1093 * at this time, and set lock state to cl_use_try()
1103 rc = cl_unuse_try_internal(env, lock); cl_use_try()
1110 cl_lock_extransit(env, lock, state); cl_use_try()
1120 struct cl_lock *lock, cl_enqueue_kick()
1127 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_enqueue_kick()
1140 * Tries to enqueue a lock.
1142 * This function is called repeatedly by cl_enqueue() until either lock is
1146 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1147 * lock->cll_state == CLS_HELD)
1152 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, cl_enqueue_try() argument
1157 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock); cl_enqueue_try()
1159 LINVRNT(cl_lock_is_mutexed(lock)); cl_enqueue_try()
1161 result = lock->cll_error; cl_enqueue_try()
1165 switch (lock->cll_state) { cl_enqueue_try()
1167 cl_lock_state_set(env, lock, CLS_QUEUING); cl_enqueue_try()
1171 result = cl_enqueue_kick(env, lock, io, flags); cl_enqueue_try()
1174 if (result == 0 && lock->cll_state == CLS_QUEUING) cl_enqueue_try()
1175 cl_lock_state_set(env, lock, CLS_ENQUEUED); cl_enqueue_try()
1178 LASSERT(cl_lock_is_intransit(lock)); cl_enqueue_try()
1182 /* yank lock from the cache. */ cl_enqueue_try()
1183 result = cl_use_try(env, lock, 0); cl_enqueue_try()
1204 * Cancel the conflicting lock found during previous enqueue.
1206 * \retval 0 conflicting lock has been canceled.
1210 struct cl_lock *lock, cl_lock_enqueue_wait()
1216 LASSERT(cl_lock_is_mutexed(lock)); cl_lock_enqueue_wait()
1217 LASSERT(lock->cll_state == CLS_QUEUING); cl_lock_enqueue_wait()
1218 LASSERT(lock->cll_conflict != NULL); cl_lock_enqueue_wait()
1220 conflict = lock->cll_conflict; cl_lock_enqueue_wait()
1221 lock->cll_conflict = NULL; cl_lock_enqueue_wait()
1223 cl_lock_mutex_put(env, lock); cl_lock_enqueue_wait()
1237 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock); cl_lock_enqueue_wait()
1241 cl_lock_mutex_get(env, lock); cl_lock_enqueue_wait()
1248 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, cl_enqueue_locked() argument
1253 LINVRNT(cl_lock_is_mutexed(lock)); cl_enqueue_locked()
1254 LINVRNT(cl_lock_invariant(env, lock)); cl_enqueue_locked()
1255 LASSERT(lock->cll_holds > 0); cl_enqueue_locked()
1257 cl_lock_user_add(env, lock); cl_enqueue_locked()
1259 result = cl_enqueue_try(env, lock, io, enqflags); cl_enqueue_locked()
1261 if (lock->cll_conflict != NULL) cl_enqueue_locked()
1262 result = cl_lock_enqueue_wait(env, lock, 1); cl_enqueue_locked()
1264 result = cl_lock_state_wait(env, lock); cl_enqueue_locked()
1271 cl_unuse_try(env, lock); cl_enqueue_locked()
1273 lock->cll_state == CLS_ENQUEUED || cl_enqueue_locked()
1274 lock->cll_state == CLS_HELD)); cl_enqueue_locked()
1279 * Enqueues a lock.
1281 * \pre current thread or io owns a hold on lock.
1283 * \post ergo(result == 0, lock->users increased)
1284 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1285 * lock->cll_state == CLS_HELD)
1287 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock, cl_enqueue() argument
1292 cl_lock_lockdep_acquire(env, lock, enqflags); cl_enqueue()
1293 cl_lock_mutex_get(env, lock); cl_enqueue()
1294 result = cl_enqueue_locked(env, lock, io, enqflags); cl_enqueue()
1295 cl_lock_mutex_put(env, lock); cl_enqueue()
1297 cl_lock_lockdep_release(env, lock); cl_enqueue()
1298 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED || cl_enqueue()
1299 lock->cll_state == CLS_HELD)); cl_enqueue()
1305 * Tries to unlock a lock.
1308 * 1. for top lock, the resource is sublocks it held;
1316 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) cl_unuse_try() argument
1321 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock); cl_unuse_try()
1323 if (lock->cll_users > 1) { cl_unuse_try()
1324 cl_lock_user_del(env, lock); cl_unuse_try()
1328 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold cl_unuse_try()
1330 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) { cl_unuse_try()
1331 cl_lock_user_del(env, lock); cl_unuse_try()
1336 * New lock users (->cll_users) are not protecting unlocking cl_unuse_try()
1337 * from proceeding. From this point, lock eventually reaches cl_unuse_try()
1341 state = cl_lock_intransit(env, lock); cl_unuse_try()
1343 result = cl_unuse_try_internal(env, lock); cl_unuse_try()
1344 LASSERT(lock->cll_state == CLS_INTRANSIT); cl_unuse_try()
1346 cl_lock_user_del(env, lock); cl_unuse_try()
1349 * Return lock back to the cache. This is the only cl_unuse_try()
1350 * place where lock is moved into CLS_CACHED state. cl_unuse_try()
1352 * If one of ->clo_unuse() methods returned -ESTALE, lock cl_unuse_try()
1354 * re-initialized. This happens e.g., when a sub-lock was cl_unuse_try()
1361 cl_lock_extransit(env, lock, state); cl_unuse_try()
1365 * If the lock is a glimpse lock, and it has multiple cl_unuse_try()
1368 * we can't set this lock to error because otherwise some of cl_unuse_try()
1376 cl_lock_extransit(env, lock, state); cl_unuse_try()
1378 return result ?: lock->cll_error; cl_unuse_try()
1382 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock) cl_unuse_locked() argument
1386 result = cl_unuse_try(env, lock); cl_unuse_locked()
1388 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result); cl_unuse_locked()
1392 * Unlocks a lock.
1394 void cl_unuse(const struct lu_env *env, struct cl_lock *lock) cl_unuse() argument
1396 cl_lock_mutex_get(env, lock); cl_unuse()
1397 cl_unuse_locked(env, lock); cl_unuse()
1398 cl_lock_mutex_put(env, lock); cl_unuse()
1399 cl_lock_lockdep_release(env, lock); cl_unuse()
1404 * Tries to wait for a lock.
1406 * This function is called repeatedly by cl_wait() until either lock is
1413 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) cl_wait_try() argument
1418 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock); cl_wait_try()
1420 LINVRNT(cl_lock_is_mutexed(lock)); cl_wait_try()
1421 LINVRNT(cl_lock_invariant(env, lock)); cl_wait_try()
1422 LASSERTF(lock->cll_state == CLS_QUEUING || cl_wait_try()
1423 lock->cll_state == CLS_ENQUEUED || cl_wait_try()
1424 lock->cll_state == CLS_HELD || cl_wait_try()
1425 lock->cll_state == CLS_INTRANSIT, cl_wait_try()
1426 "lock state: %d\n", lock->cll_state); cl_wait_try()
1427 LASSERT(lock->cll_users > 0); cl_wait_try()
1428 LASSERT(lock->cll_holds > 0); cl_wait_try()
1430 result = lock->cll_error; cl_wait_try()
1434 if (cl_lock_is_intransit(lock)) { cl_wait_try()
1439 if (lock->cll_state == CLS_HELD) cl_wait_try()
1444 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_wait_try()
1453 LASSERT(lock->cll_state != CLS_INTRANSIT); cl_wait_try()
1454 cl_lock_state_set(env, lock, CLS_HELD); cl_wait_try()
1462 * Waits until enqueued lock is granted.
1464 * \pre current thread or io owns a hold on the lock
1465 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1466 * lock->cll_state == CLS_HELD)
1468 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1470 int cl_wait(const struct lu_env *env, struct cl_lock *lock) cl_wait() argument
1474 cl_lock_mutex_get(env, lock); cl_wait()
1476 LINVRNT(cl_lock_invariant(env, lock)); cl_wait()
1477 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, cl_wait()
1478 "Wrong state %d \n", lock->cll_state); cl_wait()
1479 LASSERT(lock->cll_holds > 0); cl_wait()
1482 result = cl_wait_try(env, lock); cl_wait()
1484 result = cl_lock_state_wait(env, lock); cl_wait()
1491 cl_unuse_try(env, lock); cl_wait()
1492 cl_lock_lockdep_release(env, lock); cl_wait()
1494 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock); cl_wait()
1495 cl_lock_mutex_put(env, lock); cl_wait()
1496 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD)); cl_wait()
1502 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1505 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) cl_lock_weigh() argument
1511 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_weigh()
1512 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_weigh()
1515 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { cl_lock_weigh()
1528 * Notifies layers that lock description changed.
1530 * The server can grant client a lock different from one that was requested
1531 * (e.g., larger in extent). This method is called when actually granted lock
1532 * description becomes known to let layers to accommodate for changed lock
1537 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, cl_lock_modify() argument
1541 struct cl_object *obj = lock->cll_descr.cld_obj; cl_lock_modify()
1545 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock); cl_lock_modify()
1548 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_modify()
1549 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_modify()
1551 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { cl_lock_modify()
1558 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n", cl_lock_modify()
1566 lock->cll_descr = *desc; cl_lock_modify()
1573 * Initializes lock closure with a given origin.
1592 * Builds a closure of \a lock.
1594 * Building of a closure consists of adding initial lock (\a lock) into it,
1595 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1601 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, cl_lock_closure_build() argument
1610 result = cl_lock_enclosure(env, lock, closure); cl_lock_closure_build()
1612 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_lock_closure_build()
1628 * Adds new lock to a closure.
1630 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1631 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1632 * until next try-lock is likely to succeed.
1634 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, cl_lock_enclosure() argument
1639 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock); cl_lock_enclosure()
1640 if (!cl_lock_mutex_try(env, lock)) { cl_lock_enclosure()
1642 * If lock->cll_inclosure is not empty, lock is already in cl_lock_enclosure()
1645 if (list_empty(&lock->cll_inclosure)) { cl_lock_enclosure()
1646 cl_lock_get_trust(lock); cl_lock_enclosure()
1647 lu_ref_add(&lock->cll_reference, "closure", closure); cl_lock_enclosure()
1648 list_add(&lock->cll_inclosure, &closure->clc_list); cl_lock_enclosure()
1651 cl_lock_mutex_put(env, lock); cl_lock_enclosure()
1656 cl_lock_get_trust(lock); cl_lock_enclosure()
1657 lu_ref_add(&lock->cll_reference, "closure-w", closure); cl_lock_enclosure()
1661 cl_lock_mutex_get(env, lock); cl_lock_enclosure()
1662 cl_lock_mutex_put(env, lock); cl_lock_enclosure()
1665 lu_ref_del(&lock->cll_reference, "closure-w", closure); cl_lock_enclosure()
1666 cl_lock_put(env, lock); cl_lock_enclosure()
1681 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); cl_lock_disclosure()
1703 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1704 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1706 * made to destroy the lock in the future. E.g., when a blocking AST is
1709 * Caller must have a reference on this lock to prevent a situation, when
1710 * deleted lock lingers in memory for indefinite time, because nobody calls
1713 * \pre atomic_read(&lock->cll_ref) > 0
1714 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1716 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1717 * held, as deletion of sub-locks might require releasing a top-lock
1723 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock) cl_lock_delete() argument
1725 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_delete()
1726 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_delete()
1727 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP, cl_lock_delete()
1730 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock); cl_lock_delete()
1731 if (lock->cll_holds == 0) cl_lock_delete()
1732 cl_lock_delete0(env, lock); cl_lock_delete()
1734 lock->cll_flags |= CLF_DOOMED; cl_lock_delete()
1739 * Mark lock as irrecoverably failed, and mark it for destruction. This
1740 * happens when, e.g., server fails to grant a lock to us, or networking
1743 * \pre atomic_read(&lock->cll_ref) > 0
1748 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error) cl_lock_error() argument
1750 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_error()
1751 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_error()
1753 if (lock->cll_error == 0 && error != 0) { cl_lock_error()
1754 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock); cl_lock_error()
1755 lock->cll_error = error; cl_lock_error()
1756 cl_lock_signal(env, lock); cl_lock_error()
1757 cl_lock_cancel(env, lock); cl_lock_error()
1758 cl_lock_delete(env, lock); cl_lock_error()
1764 * Cancels this lock. Notifies layers
1765 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1766 * there are holds on the lock, postpone cancellation until
1774 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock) cl_lock_cancel() argument
1776 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_cancel()
1777 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_cancel()
1779 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock); cl_lock_cancel()
1780 if (lock->cll_holds == 0) cl_lock_cancel()
1781 cl_lock_cancel0(env, lock); cl_lock_cancel()
1783 lock->cll_flags |= CLF_CANCELPEND; cl_lock_cancel()
1788 * Finds an existing lock covering given index and optionally different from a
1789 * given \a except lock.
1798 struct cl_lock *lock; cl_lock_at_pgoff() local
1803 lock = NULL; cl_lock_at_pgoff()
1811 /* It is fine to match any group lock since there could be only one cl_lock_at_pgoff()
1812 * with a uniq gid and it conflicts with all other lock modes too */ cl_lock_at_pgoff()
1820 * This check is racy as the lock can be canceled right cl_lock_at_pgoff()
1829 lock = scan; cl_lock_at_pgoff()
1834 return lock; cl_lock_at_pgoff()
1839 * Calculate the page offset at the layer of @lock.
1840 * At the time of this writing, @page is top page and @lock is sub lock.
1842 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) pgoff_at_lock() argument
1847 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type; pgoff_at_lock()
1854 * Check if page @page is covered by an extra lock or discard it.
1860 struct cl_lock *lock = cbdata; check_and_discard_cb() local
1861 pgoff_t index = pgoff_at_lock(page, lock); check_and_discard_cb()
1867 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, check_and_discard_cb()
1868 lock, 1, 0); check_and_discard_cb()
1872 * is safe because if tmp lock is canceled, it will check_and_discard_cb()
1896 struct cl_lock *lock = cbdata; discard_cb() local
1898 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); discard_cb()
1904 info->clt_next_index = pgoff_at_lock(page, lock) + 1; discard_cb()
1918 * Discard pages protected by the given lock. This function traverses radix
1923 * behind this being that lock cancellation cannot be delayed indefinitely).
1925 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock) cl_lock_discard_pages() argument
1929 struct cl_lock_descr *descr = &lock->cll_descr; cl_lock_discard_pages()
1934 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_discard_pages()
1947 cb, (void *)lock); cl_lock_discard_pages()
1963 * Caller has to guarantee that no lock is in active use.
1971 struct cl_lock *lock; cl_locks_prune() local
1983 lock = container_of(head->coh_locks.next, cl_locks_prune()
1985 cl_lock_get_trust(lock); cl_locks_prune()
1987 lu_ref_add(&lock->cll_reference, "prune", current); cl_locks_prune()
1990 cl_lock_mutex_get(env, lock); cl_locks_prune()
1991 if (lock->cll_state < CLS_FREEING) { cl_locks_prune()
1992 LASSERT(lock->cll_users <= 1); cl_locks_prune()
1993 if (unlikely(lock->cll_users == 1)) { cl_locks_prune()
1996 cl_lock_mutex_put(env, lock); cl_locks_prune()
1997 l_wait_event(lock->cll_wq, cl_locks_prune()
1998 lock->cll_users == 0, cl_locks_prune()
2004 cl_lock_cancel(env, lock); cl_locks_prune()
2005 cl_lock_delete(env, lock); cl_locks_prune()
2007 cl_lock_mutex_put(env, lock); cl_locks_prune()
2008 lu_ref_del(&lock->cll_reference, "prune", current); cl_locks_prune()
2009 cl_lock_put(env, lock); cl_locks_prune()
2021 struct cl_lock *lock; cl_lock_hold_mutex() local
2024 lock = cl_lock_find(env, io, need); cl_lock_hold_mutex()
2025 if (IS_ERR(lock)) cl_lock_hold_mutex()
2027 cl_lock_mutex_get(env, lock); cl_lock_hold_mutex()
2028 if (lock->cll_state < CLS_FREEING && cl_lock_hold_mutex()
2029 !(lock->cll_flags & CLF_CANCELLED)) { cl_lock_hold_mutex()
2030 cl_lock_hold_mod(env, lock, +1); cl_lock_hold_mutex()
2031 lu_ref_add(&lock->cll_holders, scope, source); cl_lock_hold_mutex()
2032 lu_ref_add(&lock->cll_reference, scope, source); cl_lock_hold_mutex()
2035 cl_lock_mutex_put(env, lock); cl_lock_hold_mutex()
2036 cl_lock_put(env, lock); cl_lock_hold_mutex()
2038 return lock; cl_lock_hold_mutex()
2042 * Returns a lock matching \a need description with a reference and a hold on
2046 * guarantees that lock is not in the CLS_FREEING state on return.
2052 struct cl_lock *lock; cl_lock_hold() local
2054 lock = cl_lock_hold_mutex(env, io, need, scope, source); cl_lock_hold()
2055 if (!IS_ERR(lock)) cl_lock_hold()
2056 cl_lock_mutex_put(env, lock); cl_lock_hold()
2057 return lock; cl_lock_hold()
2063 * enqueues new lock matching given description.
2069 struct cl_lock *lock; cl_lock_request() local
2074 lock = cl_lock_hold_mutex(env, io, need, scope, source); cl_lock_request()
2075 if (IS_ERR(lock)) cl_lock_request()
2078 rc = cl_enqueue_locked(env, lock, io, enqflags); cl_lock_request()
2080 if (cl_lock_fits_into(env, lock, need, io)) { cl_lock_request()
2082 cl_lock_mutex_put(env, lock); cl_lock_request()
2083 cl_lock_lockdep_acquire(env, lock, cl_lock_request()
2089 cl_unuse_locked(env, lock); cl_lock_request()
2092 rc <= 0 ? "enqueue failed" : "agl succeed", lock); cl_lock_request()
2093 cl_lock_hold_release(env, lock, scope, source); cl_lock_request()
2094 cl_lock_mutex_put(env, lock); cl_lock_request()
2095 lu_ref_del(&lock->cll_reference, scope, source); cl_lock_request()
2096 cl_lock_put(env, lock); cl_lock_request()
2099 lock = NULL; cl_lock_request()
2101 lock = ERR_PTR(rc); cl_lock_request()
2104 return lock; cl_lock_request()
2109 * Adds a hold to a known lock.
2111 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, cl_lock_hold_add() argument
2114 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_hold_add()
2115 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_hold_add()
2116 LASSERT(lock->cll_state != CLS_FREEING); cl_lock_hold_add()
2118 cl_lock_hold_mod(env, lock, +1); cl_lock_hold_add()
2119 cl_lock_get(lock); cl_lock_hold_add()
2120 lu_ref_add(&lock->cll_holders, scope, source); cl_lock_hold_add()
2121 lu_ref_add(&lock->cll_reference, scope, source); cl_lock_hold_add()
2126 * Releases a hold and a reference on a lock, on which caller acquired a
2129 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, cl_lock_unhold() argument
2132 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_unhold()
2133 cl_lock_hold_release(env, lock, scope, source); cl_lock_unhold()
2134 lu_ref_del(&lock->cll_reference, scope, source); cl_lock_unhold()
2135 cl_lock_put(env, lock); cl_lock_unhold()
2140 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2142 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, cl_lock_release() argument
2145 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_release()
2146 cl_lock_trace(D_DLMTRACE, env, "release lock", lock); cl_lock_release()
2147 cl_lock_mutex_get(env, lock); cl_lock_release()
2148 cl_lock_hold_release(env, lock, scope, source); cl_lock_release()
2149 cl_lock_mutex_put(env, lock); cl_lock_release()
2150 lu_ref_del(&lock->cll_reference, scope, source); cl_lock_release()
2151 cl_lock_put(env, lock); cl_lock_release()
2155 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock) cl_lock_user_add() argument
2157 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_user_add()
2158 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_user_add()
2160 cl_lock_used_mod(env, lock, +1); cl_lock_user_add()
2164 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock) cl_lock_user_del() argument
2166 LINVRNT(cl_lock_is_mutexed(lock)); cl_lock_user_del()
2167 LINVRNT(cl_lock_invariant(env, lock)); cl_lock_user_del()
2168 LASSERT(lock->cll_users > 0); cl_lock_user_del()
2170 cl_lock_used_mod(env, lock, -1); cl_lock_user_del()
2171 if (lock->cll_users == 0) cl_lock_user_del()
2172 wake_up_all(&lock->cll_wq); cl_lock_user_del()
2192 * Prints human readable representation of a lock description.
2206 * Prints human readable representation of \a lock to the \a f.
2209 lu_printer_t printer, const struct cl_lock *lock) cl_lock_print()
2212 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ", cl_lock_print()
2213 lock, atomic_read(&lock->cll_ref), cl_lock_print()
2214 lock->cll_state, lock->cll_error, lock->cll_holds, cl_lock_print()
2215 lock->cll_users, lock->cll_flags); cl_lock_print()
2216 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr); cl_lock_print()
2219 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { cl_lock_print()
2227 (*printer)(env, cookie, "} lock@%p\n", lock); cl_lock_print()
76 cl_lock_invariant_trusted(const struct lu_env *env, const struct cl_lock *lock) cl_lock_invariant_trusted() argument
92 cl_lock_invariant(const struct lu_env *env, const struct cl_lock *lock) cl_lock_invariant() argument
115 cl_lock_counters(const struct lu_env *env, const struct cl_lock *lock) cl_lock_counters() argument
127 cl_lock_trace0(int level, const struct lu_env *env, const char *prefix, const struct cl_lock *lock, const char *func, const int line) cl_lock_trace0() argument
153 cl_lock_lockdep_acquire(const struct lu_env *env, struct cl_lock *lock, __u32 enqflags) cl_lock_lockdep_acquire() argument
160 cl_lock_lockdep_release(const struct lu_env *env, struct cl_lock *lock) cl_lock_lockdep_release() argument
171 cl_lock_lockdep_acquire(const struct lu_env *env, struct cl_lock *lock, __u32 enqflags) cl_lock_lockdep_acquire() argument
174 cl_lock_lockdep_release(const struct lu_env *env, struct cl_lock *lock) cl_lock_lockdep_release() argument
406 cl_lock_intransit(const struct lu_env *env, struct cl_lock *lock) cl_lock_intransit() argument
455 cl_lock_fits_into(const struct lu_env *env, const struct cl_lock *lock, const struct cl_lock_descr *need, const struct cl_io *io) cl_lock_fits_into() argument
1119 cl_enqueue_kick(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 flags) cl_enqueue_kick() argument
1209 cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, int keep_mutex) cl_lock_enqueue_wait() argument
2208 cl_lock_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock *lock) cl_lock_print() argument
/linux-4.1.27/include/asm-generic/bitops/
H A Dext2-atomic.h8 #define ext2_set_bit_atomic(lock, nr, addr) \
11 spin_lock(lock); \
13 spin_unlock(lock); \
17 #define ext2_clear_bit_atomic(lock, nr, addr) \
20 spin_lock(lock); \
22 spin_unlock(lock); \
/linux-4.1.27/tools/lib/lockdep/include/liblockdep/
H A Drwlock.h20 static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock, __rwlock_init() argument
25 lockdep_init_map(&lock->dep_map, name, key, 0); __rwlock_init()
27 return pthread_rwlock_init(&lock->rwlock, attr); __rwlock_init()
30 #define liblockdep_pthread_rwlock_init(lock, attr) \
34 __rwlock_init((lock), #lock, &__key, (attr)); \
37 static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) liblockdep_pthread_rwlock_rdlock() argument
39 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_rwlock_rdlock()
40 return pthread_rwlock_rdlock(&lock->rwlock); liblockdep_pthread_rwlock_rdlock()
44 static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock) liblockdep_pthread_rwlock_unlock() argument
46 lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); liblockdep_pthread_rwlock_unlock()
47 return pthread_rwlock_unlock(&lock->rwlock); liblockdep_pthread_rwlock_unlock()
50 static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) liblockdep_pthread_rwlock_wrlock() argument
52 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_rwlock_wrlock()
53 return pthread_rwlock_wrlock(&lock->rwlock); liblockdep_pthread_rwlock_wrlock()
56 static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) liblockdep_pthread_rwlock_tryrdlock() argument
58 lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_rwlock_tryrdlock()
59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; liblockdep_pthread_rwlock_tryrdlock()
62 static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) liblockdep_pthread_rwlock_trywlock() argument
64 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_rwlock_trywlock()
65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; liblockdep_pthread_rwlock_trywlock()
68 static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock) liblockdep_rwlock_destroy() argument
70 return pthread_rwlock_destroy(&lock->rwlock); liblockdep_rwlock_destroy()
H A Dmutex.h20 static inline int __mutex_init(liblockdep_pthread_mutex_t *lock, __mutex_init() argument
25 lockdep_init_map(&lock->dep_map, name, key, 0); __mutex_init()
26 return pthread_mutex_init(&lock->mutex, __mutexattr); __mutex_init()
36 static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) liblockdep_pthread_mutex_lock() argument
38 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_mutex_lock()
39 return pthread_mutex_lock(&lock->mutex); liblockdep_pthread_mutex_lock()
42 static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock) liblockdep_pthread_mutex_unlock() argument
44 lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); liblockdep_pthread_mutex_unlock()
45 return pthread_mutex_unlock(&lock->mutex); liblockdep_pthread_mutex_unlock()
48 static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) liblockdep_pthread_mutex_trylock() argument
50 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); liblockdep_pthread_mutex_trylock()
51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; liblockdep_pthread_mutex_trylock()
54 static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock) liblockdep_pthread_mutex_destroy() argument
56 return pthread_mutex_destroy(&lock->mutex); liblockdep_pthread_mutex_destroy()
/linux-4.1.27/include/trace/events/
H A Dlock.h2 #define TRACE_SYSTEM lock
14 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
18 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
22 __string(name, lock->name)
28 __assign_str(name, lock->name);
29 __entry->lockdep_addr = lock;
38 DECLARE_EVENT_CLASS(lock,
40 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
42 TP_ARGS(lock, ip),
45 __string( name, lock->name )
50 __assign_str(name, lock->name);
51 __entry->lockdep_addr = lock;
57 DEFINE_EVENT(lock, lock_release,
59 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
61 TP_ARGS(lock, ip)
66 DEFINE_EVENT(lock, lock_contended,
68 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
70 TP_ARGS(lock, ip)
73 DEFINE_EVENT(lock, lock_acquired,
75 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
77 TP_ARGS(lock, ip)
/linux-4.1.27/drivers/md/persistent-data/
H A Ddm-block-manager.c28 * trace is also emitted for the previous lock acquisition.
38 spinlock_t lock; member in struct:block_lock
55 static unsigned __find_holder(struct block_lock *lock, __find_holder() argument
61 if (lock->holders[i] == task) __find_holder()
68 /* call this *after* you increment lock->count */ __add_holder()
69 static void __add_holder(struct block_lock *lock, struct task_struct *task) __add_holder() argument
71 unsigned h = __find_holder(lock, NULL); __add_holder()
77 lock->holders[h] = task; __add_holder()
80 t = lock->traces + h; __add_holder()
83 t->entries = lock->entries[h]; __add_holder()
89 /* call this *before* you decrement lock->count */ __del_holder()
90 static void __del_holder(struct block_lock *lock, struct task_struct *task) __del_holder() argument
92 unsigned h = __find_holder(lock, task); __del_holder()
93 lock->holders[h] = NULL; __del_holder()
97 static int __check_holder(struct block_lock *lock) __check_holder() argument
106 if (lock->holders[i] == current) { __check_holder()
107 DMERR("recursive lock detected in metadata"); __check_holder()
110 print_stack_trace(lock->traces + i, 4); __check_holder()
155 static void __wake_many(struct block_lock *lock) __wake_many() argument
159 BUG_ON(lock->count < 0); __wake_many()
160 list_for_each_entry_safe(w, tmp, &lock->waiters, list) { __wake_many()
161 if (lock->count >= MAX_HOLDERS) __wake_many()
165 if (lock->count > 0) __wake_many()
168 lock->count = -1; __wake_many()
169 __add_holder(lock, w->task); __wake_many()
174 lock->count++; __wake_many()
175 __add_holder(lock, w->task); __wake_many()
180 static void bl_init(struct block_lock *lock) bl_init() argument
184 spin_lock_init(&lock->lock); bl_init()
185 lock->count = 0; bl_init()
186 INIT_LIST_HEAD(&lock->waiters); bl_init()
188 lock->holders[i] = NULL; bl_init()
191 static int __available_for_read(struct block_lock *lock) __available_for_read() argument
193 return lock->count >= 0 && __available_for_read()
194 lock->count < MAX_HOLDERS && __available_for_read()
195 list_empty(&lock->waiters); __available_for_read()
198 static int bl_down_read(struct block_lock *lock) bl_down_read() argument
203 spin_lock(&lock->lock); bl_down_read()
204 r = __check_holder(lock); bl_down_read()
206 spin_unlock(&lock->lock); bl_down_read()
210 if (__available_for_read(lock)) { bl_down_read()
211 lock->count++; bl_down_read()
212 __add_holder(lock, current); bl_down_read()
213 spin_unlock(&lock->lock); bl_down_read()
221 list_add_tail(&w.list, &lock->waiters); bl_down_read()
222 spin_unlock(&lock->lock); bl_down_read()
229 static int bl_down_read_nonblock(struct block_lock *lock) bl_down_read_nonblock() argument
233 spin_lock(&lock->lock); bl_down_read_nonblock()
234 r = __check_holder(lock); bl_down_read_nonblock()
238 if (__available_for_read(lock)) { bl_down_read_nonblock()
239 lock->count++; bl_down_read_nonblock()
240 __add_holder(lock, current); bl_down_read_nonblock()
246 spin_unlock(&lock->lock); bl_down_read_nonblock()
250 static void bl_up_read(struct block_lock *lock) bl_up_read() argument
252 spin_lock(&lock->lock); bl_up_read()
253 BUG_ON(lock->count <= 0); bl_up_read()
254 __del_holder(lock, current); bl_up_read()
255 --lock->count; bl_up_read()
256 if (!list_empty(&lock->waiters)) bl_up_read()
257 __wake_many(lock); bl_up_read()
258 spin_unlock(&lock->lock); bl_up_read()
261 static int bl_down_write(struct block_lock *lock) bl_down_write() argument
266 spin_lock(&lock->lock); bl_down_write()
267 r = __check_holder(lock); bl_down_write()
269 spin_unlock(&lock->lock); bl_down_write()
273 if (lock->count == 0 && list_empty(&lock->waiters)) { bl_down_write()
274 lock->count = -1; bl_down_write()
275 __add_holder(lock, current); bl_down_write()
276 spin_unlock(&lock->lock); bl_down_write()
288 list_add(&w.list, &lock->waiters); bl_down_write()
289 spin_unlock(&lock->lock); bl_down_write()
297 static void bl_up_write(struct block_lock *lock) bl_up_write() argument
299 spin_lock(&lock->lock); bl_up_write()
300 __del_holder(lock, current); bl_up_write()
301 lock->count = 0; bl_up_write()
302 if (!list_empty(&lock->waiters)) bl_up_write()
303 __wake_many(lock); bl_up_write()
304 spin_unlock(&lock->lock); bl_up_write()
342 struct block_lock lock; member in struct:buffer_aux
350 bl_init(&aux->lock); dm_block_manager_alloc_callback()
461 r = bl_down_read(&aux->lock); dm_bm_read_lock()
472 bl_up_read(&aux->lock); dm_bm_read_lock()
497 r = bl_down_write(&aux->lock); dm_bm_write_lock()
508 bl_up_write(&aux->lock); dm_bm_write_lock()
532 r = bl_down_read_nonblock(&aux->lock); dm_bm_read_try_lock()
542 bl_up_read(&aux->lock); dm_bm_read_try_lock()
568 r = bl_down_write(&aux->lock); dm_bm_write_lock_zero()
588 bl_up_write(&aux->lock); dm_bm_unlock()
590 bl_up_read(&aux->lock); dm_bm_unlock()
/linux-4.1.27/include/drm/ttm/
H A Dttm_lock.h33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
42 * excludes write lock and read lock.
44 * The suspend mode is used to lock out all TTM users when preparing for
59 * @base: ttm base object used solely to release the lock if the client
60 * holding the lock dies.
61 * @queue: Queue for processes waiting for lock change-of-status.
62 * @lock: Spinlock protecting some lock members.
63 * @rw: Read-write lock counter. Protected by @lock.
64 * @flags: Lock state. Protected by @lock.
65 * @kill_takers: Boolean whether to kill takers of the lock.
72 spinlock_t lock; member in struct:ttm_lock
84 * @lock: Pointer to a struct ttm_lock
85 * Initializes the lock.
87 extern void ttm_lock_init(struct ttm_lock *lock);
92 * @lock: Pointer to a struct ttm_lock
94 * Releases a read lock.
96 extern void ttm_read_unlock(struct ttm_lock *lock);
101 * @lock: Pointer to a struct ttm_lock
102 * @interruptible: Interruptible sleeping while waiting for a lock.
104 * Takes the lock in read mode.
108 extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
113 * @lock: Pointer to a struct ttm_lock
114 * @interruptible: Interruptible sleeping while waiting for a lock.
116 * Tries to take the lock in read mode. If the lock is already held
117 * in write mode, the function will return -EBUSY. If the lock is held
122 * -EBUSY The lock was already held in write mode.
125 extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
130 * @lock: Pointer to a struct ttm_lock
132 * Releases a write lock.
134 extern void ttm_write_unlock(struct ttm_lock *lock);
139 * @lock: Pointer to a struct ttm_lock
140 * @interruptible: Interruptible sleeping while waiting for a lock.
142 * Takes the lock in write mode.
146 extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
151 * @lock: Pointer to a struct ttm_lock
153 * Downgrades a write lock to a read lock.
155 extern void ttm_lock_downgrade(struct ttm_lock *lock);
160 * @lock: Pointer to a struct ttm_lock
162 * Takes the lock in suspend mode. Excludes read and write mode.
164 extern void ttm_suspend_lock(struct ttm_lock *lock);
169 * @lock: Pointer to a struct ttm_lock
171 * Releases a suspend lock
173 extern void ttm_suspend_unlock(struct ttm_lock *lock);
178 * @lock: Pointer to a struct ttm_lock
179 * @interruptible: Interruptible sleeping while waiting for a lock.
180 * @tfile: Pointer to a struct ttm_object_file to register the lock with.
182 * Takes the lock in vt mode.
187 extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
193 * @lock: Pointer to a struct ttm_lock
195 * Releases a vt lock.
197 * -EINVAL If the lock was not held.
199 extern int ttm_vt_unlock(struct ttm_lock *lock);
204 * @lock: Pointer to a struct ttm_lock
206 * Releases a write lock.
208 extern void ttm_write_unlock(struct ttm_lock *lock);
213 * @lock: Pointer to a struct ttm_lock
214 * @interruptible: Interruptible sleeping while waiting for a lock.
216 * Takes the lock in write mode.
220 extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
225 * @lock: Pointer to a struct ttm_lock
226 * @val: Boolean whether to kill processes taking the lock.
227 * @signal: Signal to send to the process taking the lock.
229 * The kill-when-taking-lock functionality is used to kill processes that keep
232 * - X server takes lock in write mode.
235 * - X server releases the lock on file release.
236 * - Another dri client wants to render, takes the lock and is killed.
239 static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, ttm_lock_set_kill() argument
242 lock->kill_takers = val; ttm_lock_set_kill()
244 lock->signal = signal; ttm_lock_set_kill()
/linux-4.1.27/arch/s390/include/asm/
H A Dspinlock_types.h9 unsigned int lock; member in struct:__anon2471
12 #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
15 unsigned int lock; member in struct:__anon2472
H A Dspinlock.h19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) _raw_compare_and_swap() argument
21 return __sync_bool_compare_and_swap(lock, old, new); _raw_compare_and_swap()
25 * Simple spin lock operations. There are two variants, one clears IRQ's
39 static inline void arch_spin_relax(arch_spinlock_t *lock) arch_spin_relax() argument
41 arch_lock_relax(lock->lock); arch_spin_relax()
49 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
51 return lock.lock == 0; arch_spin_value_unlocked()
56 return ACCESS_ONCE(lp->lock) != 0; arch_spin_is_locked()
63 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); arch_spin_trylock_once()
88 typecheck(unsigned int, lp->lock); arch_spin_unlock()
92 : "+Q" (lp->lock) arch_spin_unlock()
97 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
99 while (arch_spin_is_locked(lock)) arch_spin_unlock_wait()
100 arch_spin_relax(lock); arch_spin_unlock_wait()
110 * irq-safe write-lock, but readers can get non-irqsafe
116 * @lock: the rwlock in question.
118 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
122 * @lock: the rwlock in question.
124 #define arch_write_can_lock(x) ((x)->lock == 0)
129 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
130 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
134 unsigned int old = ACCESS_ONCE(rw->lock); arch_read_trylock_once()
136 _raw_compare_and_swap(&rw->lock, old, old + 1)); arch_read_trylock_once()
141 unsigned int old = ACCESS_ONCE(rw->lock); arch_write_trylock_once()
143 _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); arch_write_trylock_once()
187 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); arch_read_lock()
194 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); arch_read_unlock()
201 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); arch_write_lock()
210 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); arch_write_unlock()
229 old = ACCESS_ONCE(rw->lock); arch_read_unlock()
230 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); arch_read_unlock()
242 typecheck(unsigned int, rw->lock); arch_write_unlock()
248 : "+Q" (rw->lock) arch_write_unlock()
/linux-4.1.27/include/asm-generic/
H A Dqrwlock_types.h8 * The queue read/write lock data structure
13 arch_spinlock_t lock; member in struct:qrwlock
18 .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
H A Dqrwlock.h2 * Queue read/write lock
31 #define _QW_LOCKED 0xff /* A writer holds the lock */
39 extern void queue_read_lock_slowpath(struct qrwlock *lock);
40 extern void queue_write_lock_slowpath(struct qrwlock *lock);
44 * @lock: Pointer to queue rwlock structure
46 static inline int queue_read_can_lock(struct qrwlock *lock) queue_read_can_lock() argument
48 return !(atomic_read(&lock->cnts) & _QW_WMASK); queue_read_can_lock()
53 * @lock: Pointer to queue rwlock structure
55 static inline int queue_write_can_lock(struct qrwlock *lock) queue_write_can_lock() argument
57 return !atomic_read(&lock->cnts); queue_write_can_lock()
61 * queue_read_trylock - try to acquire read lock of a queue rwlock
62 * @lock : Pointer to queue rwlock structure
63 * Return: 1 if lock acquired, 0 if failed
65 static inline int queue_read_trylock(struct qrwlock *lock) queue_read_trylock() argument
69 cnts = atomic_read(&lock->cnts); queue_read_trylock()
71 cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); queue_read_trylock()
74 atomic_sub(_QR_BIAS, &lock->cnts); queue_read_trylock()
80 * queue_write_trylock - try to acquire write lock of a queue rwlock
81 * @lock : Pointer to queue rwlock structure
82 * Return: 1 if lock acquired, 0 if failed
84 static inline int queue_write_trylock(struct qrwlock *lock) queue_write_trylock() argument
88 cnts = atomic_read(&lock->cnts); queue_write_trylock()
92 return likely(atomic_cmpxchg(&lock->cnts, queue_write_trylock()
96 * queue_read_lock - acquire read lock of a queue rwlock
97 * @lock: Pointer to queue rwlock structure
99 static inline void queue_read_lock(struct qrwlock *lock) queue_read_lock() argument
103 cnts = atomic_add_return(_QR_BIAS, &lock->cnts); queue_read_lock()
108 queue_read_lock_slowpath(lock); queue_read_lock()
112 * queue_write_lock - acquire write lock of a queue rwlock
113 * @lock : Pointer to queue rwlock structure
115 static inline void queue_write_lock(struct qrwlock *lock) queue_write_lock() argument
117 /* Optimize for the unfair lock case where the fair flag is 0. */ queue_write_lock()
118 if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) queue_write_lock()
121 queue_write_lock_slowpath(lock); queue_write_lock()
125 * queue_read_unlock - release read lock of a queue rwlock
126 * @lock : Pointer to queue rwlock structure
128 static inline void queue_read_unlock(struct qrwlock *lock) queue_read_unlock() argument
134 atomic_sub(_QR_BIAS, &lock->cnts); queue_read_unlock()
139 * queue_write_unlock - release write lock of a queue rwlock
140 * @lock : Pointer to queue rwlock structure
142 static inline void queue_write_unlock(struct qrwlock *lock) queue_write_unlock() argument
149 atomic_sub(_QW_LOCKED, &lock->cnts); queue_write_unlock()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_lock.c54 * Add the current task to the lock wait queue, and attempt to take to lock.
60 struct drm_lock *lock = data; drm_legacy_lock() local
69 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_lock()
71 task_pid_nr(current), lock->context); drm_legacy_lock()
75 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", drm_legacy_lock()
76 lock->context, task_pid_nr(current), drm_legacy_lock()
77 master->lock.hw_lock->lock, lock->flags); drm_legacy_lock()
79 add_wait_queue(&master->lock.lock_queue, &entry); drm_legacy_lock()
80 spin_lock_bh(&master->lock.spinlock); drm_legacy_lock()
81 master->lock.user_waiters++; drm_legacy_lock()
82 spin_unlock_bh(&master->lock.spinlock); drm_legacy_lock()
86 if (!master->lock.hw_lock) { drm_legacy_lock()
92 if (drm_lock_take(&master->lock, lock->context)) { drm_legacy_lock()
93 master->lock.file_priv = file_priv; drm_legacy_lock()
94 master->lock.lock_time = jiffies; drm_legacy_lock()
95 break; /* Got lock */ drm_legacy_lock()
107 spin_lock_bh(&master->lock.spinlock); drm_legacy_lock()
108 master->lock.user_waiters--; drm_legacy_lock()
109 spin_unlock_bh(&master->lock.spinlock); drm_legacy_lock()
111 remove_wait_queue(&master->lock.lock_queue, &entry); drm_legacy_lock()
113 DRM_DEBUG("%d %s\n", lock->context, drm_legacy_lock()
114 ret ? "interrupted" : "has lock"); drm_legacy_lock()
126 dev->sigdata.context = lock->context; drm_legacy_lock()
127 dev->sigdata.lock = master->lock.hw_lock; drm_legacy_lock()
131 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) drm_legacy_lock()
135 lock->context); drm_legacy_lock()
152 * Transfer and free the lock.
156 struct drm_lock *lock = data; drm_legacy_unlock() local
162 if (lock->context == DRM_KERNEL_CONTEXT) { drm_legacy_unlock()
164 task_pid_nr(current), lock->context); drm_legacy_unlock()
168 if (drm_legacy_lock_free(&master->lock, lock->context)) { drm_legacy_unlock()
177 * Take the heavyweight lock.
179 * \param lock lock pointer.
181 * \return one if the lock is held, or zero otherwise.
183 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
190 volatile unsigned int *lock = &lock_data->hw_lock->lock; drm_lock_take() local
194 old = *lock; drm_lock_take()
202 prev = cmpxchg(lock, old, new); drm_lock_take()
209 DRM_ERROR("%d holds heavyweight lock\n", drm_lock_take()
217 /* Have lock */ drm_lock_take()
224 * This takes a lock forcibly and hands it to context. Should ONLY be used
225 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
228 * \param lock lock pointer.
232 * Resets the lock file pointer.
233 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
239 volatile unsigned int *lock = &lock_data->hw_lock->lock; drm_lock_transfer() local
243 old = *lock; drm_lock_transfer()
245 prev = cmpxchg(lock, old, new); drm_lock_transfer()
251 * Free lock.
254 * \param lock lock.
257 * Resets the lock file pointer.
258 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
259 * waiting on the lock queue.
264 volatile unsigned int *lock = &lock_data->hw_lock->lock; drm_legacy_lock_free() local
276 old = *lock; drm_legacy_lock_free()
278 prev = cmpxchg(lock, old, new); drm_legacy_lock_free()
282 DRM_ERROR("%d freed heavyweight lock held by %d\n", drm_legacy_lock_free()
294 * If the lock is not held, then let the signal proceed as usual. If the lock
304 struct drm_hw_lock *lock = dev->sigdata.lock; drm_notifier() local
307 /* Allow signal delivery if lock isn't held */ drm_notifier()
308 if (!lock || !_DRM_LOCK_IS_HELD(lock->lock) drm_notifier()
309 || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context) drm_notifier()
315 old = lock->lock; drm_notifier()
317 prev = cmpxchg(&lock->lock, old, new); drm_notifier()
323 * This function returns immediately and takes the hw lock
327 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
328 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
357 volatile unsigned int *lock = &lock_data->hw_lock->lock; drm_legacy_idlelock_release() local
363 old = *lock; drm_legacy_idlelock_release()
364 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); drm_legacy_idlelock_release()
378 return (file_priv->lock_count && master->lock.hw_lock && drm_legacy_i_have_hw_lock()
379 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && drm_legacy_i_have_hw_lock()
380 master->lock.file_priv == file_priv); drm_legacy_i_have_hw_lock()
H A Ddrm_modeset_lock.c44 * foreach (lock in random_ordered_set_of_locks) {
45 * ret = drm_modeset_lock(lock, &ctx)
160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
167 * with the lock acquire context.
240 * drm_modeset_unlock_crtc - drop crtc lock
243 * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
317 struct drm_modeset_lock *lock; drm_modeset_drop_locks() local
319 lock = list_first_entry(&ctx->locked, drm_modeset_drop_locks()
322 drm_modeset_unlock(lock); drm_modeset_drop_locks()
327 static inline int modeset_lock(struct drm_modeset_lock *lock, modeset_lock() argument
336 if (!ww_mutex_trylock(&lock->mutex)) modeset_lock()
341 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); modeset_lock()
343 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); modeset_lock()
345 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); modeset_lock()
348 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); modeset_lock()
351 WARN_ON(!list_empty(&lock->head)); modeset_lock()
352 list_add(&lock->head, &ctx->locked); modeset_lock()
354 /* we already hold the lock.. this is fine. For atomic modeset_lock()
361 ctx->contended = lock; modeset_lock()
388 * block until the contended lock becomes available.
409 * drm_modeset_lock - take modeset lock
410 * @lock: lock to take
414 * lock will be tracked by the context and can be released by calling
419 int drm_modeset_lock(struct drm_modeset_lock *lock, drm_modeset_lock() argument
423 return modeset_lock(lock, ctx, false, false); drm_modeset_lock()
425 ww_mutex_lock(&lock->mutex, NULL); drm_modeset_lock()
431 * drm_modeset_lock_interruptible - take modeset lock
432 * @lock: lock to take
437 int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, drm_modeset_lock_interruptible() argument
441 return modeset_lock(lock, ctx, true, false); drm_modeset_lock_interruptible()
443 return ww_mutex_lock_interruptible(&lock->mutex, NULL); drm_modeset_lock_interruptible()
448 * drm_modeset_unlock - drop modeset lock
449 * @lock: lock to release
451 void drm_modeset_unlock(struct drm_modeset_lock *lock) drm_modeset_unlock() argument
453 list_del_init(&lock->head); drm_modeset_unlock()
454 ww_mutex_unlock(&lock->mutex); drm_modeset_unlock()
/linux-4.1.27/arch/arm/include/asm/
H A Dspinlock.h48 * A memory barrier is required after we get a lock, and before we
53 #define arch_spin_unlock_wait(lock) \
54 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
56 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
58 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
64 prefetchw(&lock->slock); arch_spin_lock()
72 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) arch_spin_lock()
77 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); arch_spin_lock()
83 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
88 prefetchw(&lock->slock); arch_spin_trylock()
97 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) arch_spin_trylock()
109 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
112 lock->tickets.owner++; arch_spin_unlock()
116 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
118 return lock.tickets.owner == lock.tickets.next; arch_spin_value_unlocked()
121 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
123 return !arch_spin_value_unlocked(READ_ONCE(*lock)); arch_spin_is_locked()
126 static inline int arch_spin_is_contended(arch_spinlock_t *lock) arch_spin_is_contended() argument
128 struct __raw_tickets tickets = READ_ONCE(lock->tickets); arch_spin_is_contended()
138 * just write zero since the lock is exclusively held.
145 prefetchw(&rw->lock); arch_write_lock()
154 : "r" (&rw->lock), "r" (0x80000000) arch_write_lock()
164 prefetchw(&rw->lock); arch_write_trylock()
172 : "r" (&rw->lock), "r" (0x80000000) arch_write_trylock()
191 : "r" (&rw->lock), "r" (0) arch_write_unlock()
198 #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
202 * - Exclusively load the lock value.
204 * - Store new lock value if positive, and we still own this location.
216 prefetchw(&rw->lock); arch_read_lock()
225 : "r" (&rw->lock) arch_read_lock()
237 prefetchw(&rw->lock); arch_read_unlock()
245 : "r" (&rw->lock) arch_read_unlock()
256 prefetchw(&rw->lock); arch_read_trylock()
264 : "r" (&rw->lock) arch_read_trylock()
268 /* If the lock is negative, then it is already held for write. */ arch_read_trylock()
278 #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
280 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
281 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
283 #define arch_spin_relax(lock) cpu_relax()
284 #define arch_read_relax(lock) cpu_relax()
285 #define arch_write_relax(lock) cpu_relax()
H A Dmcs_spinlock.h8 #define arch_mcs_spin_lock_contended(lock) \
12 while (!(smp_load_acquire(lock))) \
16 #define arch_mcs_spin_unlock_contended(lock) \
18 smp_store_release(lock, 1); \
/linux-4.1.27/fs/ocfs2/dlm/
H A Ddlmast.c52 struct dlm_lock *lock);
53 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
56 * lock level will obsolete a pending bast.
57 * For example, if dlm_thread queued a bast for an EX lock that
59 * lock owner downconverted to NL, the bast is now obsolete.
61 * This is needed because the lock and convert paths can queue
64 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_should_cancel_bast() argument
67 assert_spin_locked(&lock->spinlock); dlm_should_cancel_bast()
69 if (lock->ml.highest_blocked == LKM_IVMODE) dlm_should_cancel_bast()
71 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); dlm_should_cancel_bast()
73 if (lock->bast_pending && dlm_should_cancel_bast()
74 list_empty(&lock->bast_list)) dlm_should_cancel_bast()
78 if (lock->ml.type == LKM_EXMODE) dlm_should_cancel_bast()
81 else if (lock->ml.type == LKM_NLMODE) dlm_should_cancel_bast()
84 else if (lock->ml.highest_blocked != LKM_EXMODE) dlm_should_cancel_bast()
91 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) __dlm_queue_ast() argument
96 BUG_ON(!lock); __dlm_queue_ast()
98 res = lock->lockres; __dlm_queue_ast()
102 if (!list_empty(&lock->ast_list)) { __dlm_queue_ast()
103 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " __dlm_queue_ast()
106 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast()
107 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast()
108 lock->ast_pending, lock->ml.type); __dlm_queue_ast()
111 if (lock->ast_pending) __dlm_queue_ast()
112 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", __dlm_queue_ast()
114 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast()
115 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_ast()
117 /* putting lock on list, add a ref */ __dlm_queue_ast()
118 dlm_lock_get(lock); __dlm_queue_ast()
119 spin_lock(&lock->spinlock); __dlm_queue_ast()
122 if (dlm_should_cancel_bast(dlm, lock)) { __dlm_queue_ast()
123 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", __dlm_queue_ast()
125 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast()
126 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_ast()
127 lock->bast_pending = 0; __dlm_queue_ast()
128 list_del_init(&lock->bast_list); __dlm_queue_ast()
129 lock->ml.highest_blocked = LKM_IVMODE; __dlm_queue_ast()
130 /* removing lock from list, remove a ref. guaranteed __dlm_queue_ast()
133 dlm_lock_put(lock); __dlm_queue_ast()
141 list_add_tail(&lock->ast_list, &dlm->pending_asts); __dlm_queue_ast()
142 lock->ast_pending = 1; __dlm_queue_ast()
143 spin_unlock(&lock->spinlock); __dlm_queue_ast()
146 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_queue_ast() argument
149 BUG_ON(!lock); dlm_queue_ast()
152 __dlm_queue_ast(dlm, lock); dlm_queue_ast()
157 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) __dlm_queue_bast() argument
162 BUG_ON(!lock); __dlm_queue_bast()
166 res = lock->lockres; __dlm_queue_bast()
168 BUG_ON(!list_empty(&lock->bast_list)); __dlm_queue_bast()
169 if (lock->bast_pending) __dlm_queue_bast()
170 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", __dlm_queue_bast()
172 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_bast()
173 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_bast()
175 /* putting lock on list, add a ref */ __dlm_queue_bast()
176 dlm_lock_get(lock); __dlm_queue_bast()
177 spin_lock(&lock->spinlock); __dlm_queue_bast()
178 list_add_tail(&lock->bast_list, &dlm->pending_basts); __dlm_queue_bast()
179 lock->bast_pending = 1; __dlm_queue_bast()
180 spin_unlock(&lock->spinlock); __dlm_queue_bast()
183 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) dlm_queue_bast() argument
186 BUG_ON(!lock); dlm_queue_bast()
189 __dlm_queue_bast(dlm, lock); dlm_queue_bast()
194 struct dlm_lock *lock) dlm_update_lvb()
196 struct dlm_lockstatus *lksb = lock->lksb; dlm_update_lvb()
205 lock->ml.node == dlm->node_num ? "master" : dlm_update_lvb()
210 * place when the lock is downconverted - otherwise we risk dlm_update_lvb()
224 struct dlm_lock *lock) dlm_do_local_ast()
229 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, dlm_do_local_ast()
231 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_local_ast()
232 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); dlm_do_local_ast()
234 lksb = lock->lksb; dlm_do_local_ast()
235 fn = lock->ast; dlm_do_local_ast()
236 BUG_ON(lock->ml.node != dlm->node_num); dlm_do_local_ast()
238 dlm_update_lvb(dlm, res, lock); dlm_do_local_ast()
239 (*fn)(lock->astdata); dlm_do_local_ast()
244 struct dlm_lock *lock) dlm_do_remote_ast()
250 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, dlm_do_remote_ast()
252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_remote_ast()
253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); dlm_do_remote_ast()
255 lksb = lock->lksb; dlm_do_remote_ast()
256 BUG_ON(lock->ml.node == dlm->node_num); dlm_do_remote_ast()
259 dlm_update_lvb(dlm, res, lock); dlm_do_remote_ast()
261 /* lock request came from another node dlm_do_remote_ast()
263 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); dlm_do_remote_ast()
268 struct dlm_lock *lock, int blocked_type) dlm_do_local_bast()
270 dlm_bastlockfunc_t *fn = lock->bast; dlm_do_local_bast()
272 BUG_ON(lock->ml.node != dlm->node_num); dlm_do_local_bast()
274 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", dlm_do_local_bast()
276 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_local_bast()
277 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_do_local_bast()
280 (*fn)(lock->astdata, blocked_type); dlm_do_local_bast()
292 struct dlm_lock *lock = NULL; dlm_proxy_ast_handler() local
375 lock = NULL; list_for_each_entry()
376 list_for_each_entry(lock, head, list) { list_for_each_entry()
377 if (lock->ml.cookie == cookie) list_for_each_entry()
387 list_for_each_entry(lock, head, list) { list_for_each_entry()
388 /* if lock is found but unlock is pending ignore the bast */ list_for_each_entry()
389 if (lock->ml.cookie == cookie) { list_for_each_entry()
390 if (lock->unlock_pending) list_for_each_entry()
396 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
410 /* do not alter lock refcount. switching lists. */
411 list_move_tail(&lock->list, &res->granted);
412 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
416 lock->ml.type, lock->ml.convert_type);
418 if (lock->ml.convert_type != LKM_IVMODE) {
419 lock->ml.type = lock->ml.convert_type;
420 lock->ml.convert_type = LKM_IVMODE;
425 lock->lksb->status = DLM_NORMAL;
429 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
430 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
436 dlm_do_local_ast(dlm, res, lock);
438 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
451 struct dlm_lock *lock, int msg_type, dlm_send_proxy_ast_msg()
461 res->lockname.len, res->lockname.name, lock->ml.node, msg_type, dlm_send_proxy_ast_msg()
470 past.cookie = lock->ml.cookie; dlm_send_proxy_ast_msg()
477 vec[1].iov_base = lock->lksb->lvb; dlm_send_proxy_ast_msg()
482 lock->ml.node, &status); dlm_send_proxy_ast_msg()
486 lock->ml.node); dlm_send_proxy_ast_msg()
490 "node is dead!\n", lock->ml.node); dlm_send_proxy_ast_msg()
494 "DLM_MIGRATING!\n", lock->ml.node); dlm_send_proxy_ast_msg()
498 lock->ml.node, status); dlm_send_proxy_ast_msg()
193 dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_update_lvb() argument
223 dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_do_local_ast() argument
243 dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_do_remote_ast() argument
267 dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int blocked_type) dlm_do_local_bast() argument
450 dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int msg_type, int blocked_type, int flags) dlm_send_proxy_ast_msg() argument
H A Ddlmconvert.c6 * underlying calls for lock conversion
55 * only one that holds a lock on exit (res->spinlock).
60 struct dlm_lock *lock, int flags,
65 struct dlm_lock *lock, int flags, int type);
78 struct dlm_lock *lock, int flags, int type) dlmconvert_master()
89 status = __dlmconvert_master(dlm, res, lock, flags, type, dlmconvert_master()
100 dlm_queue_ast(dlm, lock); dlmconvert_master()
110 /* performs lock conversion at the lockres master site
113 * taken: takes and drops lock->spinlock
116 * call_ast: whether ast should be called for this lock
121 struct dlm_lock *lock, int flags, __dlmconvert_master()
131 lock->ml.type, lock->ml.convert_type, type); __dlmconvert_master()
133 spin_lock(&lock->spinlock); __dlmconvert_master()
136 if (lock->ml.convert_type != LKM_IVMODE) { __dlmconvert_master()
137 mlog(ML_ERROR, "attempted to convert a lock with a lock " __dlmconvert_master()
144 if (!dlm_lock_on_list(&res->granted, lock)) { __dlmconvert_master()
145 mlog(ML_ERROR, "attempted to convert a lock not on grant " __dlmconvert_master()
152 switch (lock->ml.type) { __dlmconvert_master()
156 dlm_lock_mode_name(lock->ml.type), __dlmconvert_master()
158 lock->lksb->flags |= DLM_LKSB_PUT_LVB; __dlmconvert_master()
166 dlm_lock_mode_name(lock->ml.type), __dlmconvert_master()
168 lock->lksb->flags |= DLM_LKSB_GET_LVB; __dlmconvert_master()
172 dlm_lock_mode_name(lock->ml.type), __dlmconvert_master()
182 if (type <= lock->ml.type) __dlmconvert_master()
188 if (tmplock == lock) __dlmconvert_master()
205 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, __dlmconvert_master()
207 /* immediately grant the new lock type */ __dlmconvert_master()
208 lock->lksb->status = DLM_NORMAL; __dlmconvert_master()
209 if (lock->ml.node == dlm->node_num) __dlmconvert_master()
210 mlog(0, "doing in-place convert for nonlocal lock\n"); __dlmconvert_master()
211 lock->ml.type = type; __dlmconvert_master()
212 if (lock->lksb->flags & DLM_LKSB_PUT_LVB) __dlmconvert_master()
213 memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); __dlmconvert_master()
221 mlog(0, "failed to convert NOQUEUE lock %.*s from " __dlmconvert_master()
223 lock->ml.type, type); __dlmconvert_master()
230 lock->ml.convert_type = type; __dlmconvert_master()
231 /* do not alter lock refcount. switching lists. */ __dlmconvert_master()
232 list_move_tail(&lock->list, &res->converting); __dlmconvert_master()
235 spin_unlock(&lock->spinlock); __dlmconvert_master()
245 struct dlm_lock *lock) dlm_revert_pending_convert()
247 /* do not alter lock refcount. switching lists. */ dlm_revert_pending_convert()
248 list_move_tail(&lock->list, &res->granted); dlm_revert_pending_convert()
249 lock->ml.convert_type = LKM_IVMODE; dlm_revert_pending_convert()
250 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); dlm_revert_pending_convert()
253 /* messages the master site to do lock conversion
262 struct dlm_lock *lock, int flags, int type) dlmconvert_remote()
267 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, dlmconvert_remote()
268 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); dlmconvert_remote()
281 if (lock->ml.convert_type != LKM_IVMODE) { dlmconvert_remote()
283 mlog(ML_ERROR, "converting a remote lock that is already " dlmconvert_remote()
285 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote()
286 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote()
287 lock->ml.convert_type); dlmconvert_remote()
292 if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) { dlmconvert_remote()
297 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote()
298 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote()
299 lock->ml.type, lock->ml.convert_type); dlmconvert_remote()
305 /* move lock to local convert queue */ dlmconvert_remote()
306 /* do not alter lock refcount. switching lists. */ dlmconvert_remote()
307 list_move_tail(&lock->list, &res->converting); dlmconvert_remote()
308 lock->convert_pending = 1; dlmconvert_remote()
309 lock->ml.convert_type = type; dlmconvert_remote()
312 if (lock->ml.type == LKM_EXMODE) { dlmconvert_remote()
314 lock->lksb->flags |= DLM_LKSB_PUT_LVB; dlmconvert_remote()
316 if (lock->ml.convert_type == LKM_NLMODE) dlmconvert_remote()
320 lock->lksb->flags |= DLM_LKSB_GET_LVB; dlmconvert_remote()
328 status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); dlmconvert_remote()
332 lock->convert_pending = 0; dlmconvert_remote()
340 dlm_revert_pending_convert(res, lock); dlmconvert_remote()
366 struct dlm_lock *lock, int flags, int type) dlm_send_remote_convert_request()
380 convert.cookie = lock->ml.cookie; dlm_send_remote_convert_request()
391 vec[1].iov_base = lock->lksb->lvb; dlm_send_remote_convert_request()
446 struct dlm_lock *lock = NULL; dlm_convert_lock_handler() local
496 lock = tmp_lock; dlm_convert_lock_handler()
497 dlm_lock_get(lock); dlm_convert_lock_handler()
502 if (!lock) { dlm_convert_lock_handler()
504 mlog(ML_ERROR, "did not find lock to convert on grant queue! " dlm_convert_lock_handler()
512 /* found the lock */ dlm_convert_lock_handler()
513 lksb = lock->lksb; dlm_convert_lock_handler()
531 status = __dlmconvert_master(dlm, res, lock, flags, dlm_convert_lock_handler()
548 if (lock) dlm_convert_lock_handler()
549 dlm_lock_put(lock); dlm_convert_lock_handler()
553 dlm_queue_ast(dlm, lock); dlm_convert_lock_handler()
76 dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) dlmconvert_master() argument
119 __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) __dlmconvert_master() argument
244 dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_revert_pending_convert() argument
260 dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) dlmconvert_remote() argument
364 dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) dlm_send_remote_convert_request() argument
H A Ddlmlock.c6 * underlying calls for lock creation
62 struct dlm_lock *lock, int flags);
66 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
84 /* Tell us whether we can grant a new lock request.
89 * returns: 1 if the lock can be granted, 0 otherwise.
92 struct dlm_lock *lock) dlm_can_grant_new_lock()
97 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) dlm_can_grant_new_lock()
102 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) dlm_can_grant_new_lock()
105 lock->ml.type)) dlm_can_grant_new_lock()
112 /* performs lock creation at the lockres master site
121 struct dlm_lock *lock, int flags) dlmlock_master()
126 mlog(0, "type=%d\n", lock->ml.type); dlmlock_master()
133 lock->ml.node != dlm->node_num) { dlmlock_master()
134 /* erf. state changed after lock was dropped. */ dlmlock_master()
142 if (dlm_can_grant_new_lock(res, lock)) { dlmlock_master()
143 mlog(0, "I can grant this lock right away\n"); dlmlock_master()
145 lock->lksb->status = DLM_NORMAL; dlmlock_master()
147 dlm_lock_get(lock); dlmlock_master()
148 list_add_tail(&lock->list, &res->granted); dlmlock_master()
150 /* for the recovery lock, we can't allow the ast dlmlock_master()
152 * frozen. but the recovery lock is always locked dlmlock_master()
161 "node %u for reco lock\n", dlm->name, dlmlock_master()
162 lock->ml.node); dlmlock_master()
166 * lock right away, return DLM_NOTQUEUED */ dlmlock_master()
172 "node %u for reco lock\n", dlm->name, dlmlock_master()
173 lock->ml.node); dlmlock_master()
177 dlm_lock_get(lock); dlmlock_master()
178 list_add_tail(&lock->list, &res->blocked); dlmlock_master()
188 dlm_queue_ast(dlm, lock); dlmlock_master()
200 struct dlm_lock *lock) dlm_revert_pending_lock()
203 list_del_init(&lock->list); dlm_revert_pending_lock()
204 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; dlm_revert_pending_lock()
217 struct dlm_lock *lock, int flags) dlmlock_remote()
223 lock->ml.type, res->lockname.len, dlmlock_remote()
238 /* add lock to local (secondary) queue */ dlmlock_remote()
239 dlm_lock_get(lock); dlmlock_remote()
240 list_add_tail(&lock->list, &res->blocked); dlmlock_remote()
241 lock->lock_pending = 1; dlmlock_remote()
244 /* spec seems to say that you will get DLM_NORMAL when the lock dlmlock_remote()
246 status = dlm_send_remote_lock_request(dlm, res, lock, flags); dlmlock_remote()
250 lock->lock_pending = 0; dlmlock_remote()
255 /* recovery lock was mastered by dead node. dlmlock_remote()
258 mlog(0, "%s: recovery lock was owned by " dlmlock_remote()
271 dlm_revert_pending_lock(res, lock); dlmlock_remote()
272 dlm_lock_put(lock); dlmlock_remote()
275 /* special case for the $RECOVERY lock. dlmlock_remote()
277 * this lock on the proper secondary queue dlmlock_remote()
279 mlog(0, "%s: $RECOVERY lock for this node (%u) is " dlmlock_remote()
280 "mastered by %u; got lock, manually granting (no ast)\n", dlmlock_remote()
282 list_move_tail(&lock->list, &res->granted); dlmlock_remote()
294 /* for remote lock creation.
303 struct dlm_lock *lock, int flags) dlm_send_remote_lock_request()
311 create.requested_type = lock->ml.type; dlm_send_remote_lock_request()
312 create.cookie = lock->ml.cookie; dlm_send_remote_lock_request()
342 void dlm_lock_get(struct dlm_lock *lock) dlm_lock_get() argument
344 kref_get(&lock->lock_refs); dlm_lock_get()
347 void dlm_lock_put(struct dlm_lock *lock) dlm_lock_put() argument
349 kref_put(&lock->lock_refs, dlm_lock_release); dlm_lock_put()
354 struct dlm_lock *lock; dlm_lock_release() local
356 lock = container_of(kref, struct dlm_lock, lock_refs); dlm_lock_release()
358 BUG_ON(!list_empty(&lock->list)); dlm_lock_release()
359 BUG_ON(!list_empty(&lock->ast_list)); dlm_lock_release()
360 BUG_ON(!list_empty(&lock->bast_list)); dlm_lock_release()
361 BUG_ON(lock->ast_pending); dlm_lock_release()
362 BUG_ON(lock->bast_pending); dlm_lock_release()
364 dlm_lock_detach_lockres(lock); dlm_lock_release()
366 if (lock->lksb_kernel_allocated) { dlm_lock_release()
368 kfree(lock->lksb); dlm_lock_release()
370 kmem_cache_free(dlm_lock_cache, lock); dlm_lock_release()
373 /* associate a lock with it's lockres, getting a ref on the lockres */ dlm_lock_attach_lockres()
374 void dlm_lock_attach_lockres(struct dlm_lock *lock, dlm_lock_attach_lockres() argument
378 lock->lockres = res; dlm_lock_attach_lockres()
381 /* drop ref on lockres, if there is still one associated with lock */ dlm_lock_detach_lockres()
382 static void dlm_lock_detach_lockres(struct dlm_lock *lock) dlm_lock_detach_lockres() argument
386 res = lock->lockres; dlm_lock_detach_lockres()
388 lock->lockres = NULL; dlm_lock_detach_lockres()
389 mlog(0, "removing lock's lockres reference\n"); dlm_lock_detach_lockres()
426 struct dlm_lock *lock; dlm_new_lock() local
429 lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); dlm_new_lock()
430 if (!lock) dlm_new_lock()
437 kmem_cache_free(dlm_lock_cache, lock); dlm_new_lock()
443 dlm_init_lock(lock, type, node, cookie); dlm_new_lock()
445 lock->lksb_kernel_allocated = 1; dlm_new_lock()
446 lock->lksb = lksb; dlm_new_lock()
447 lksb->lockid = lock; dlm_new_lock()
448 return lock; dlm_new_lock()
451 /* handler for lock creation net message
480 "sending a create_lock message for lock %.*s!\n", dlm_create_lock_handler()
565 struct dlm_lock *lock = NULL; dlmlock() local
570 * lock and convert paths, especially in the retry blocks */ dlmlock()
604 lock = lksb->lockid; dlmlock()
605 if (!lock) { dlmlock()
606 mlog(ML_ERROR, "NULL lock pointer in convert " dlmlock()
611 res = lock->lockres; dlmlock()
620 * static after the original lock call. convert requests will dlmlock()
624 if (lock->lksb != lksb || lock->ast != ast || dlmlock()
625 lock->bast != bast || lock->astdata != data) { dlmlock()
630 "astdata=%p\n", lock->lksb, lock->ast, dlmlock()
631 lock->bast, lock->astdata); dlmlock()
638 status = dlmconvert_master(dlm, res, lock, flags, mode); dlmlock()
640 status = dlmconvert_remote(dlm, res, lock, flags, mode); dlmlock()
669 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); dlmlock()
670 if (!lock) { dlmlock()
678 /* find or create the lock resource */ dlmlock()
687 mlog(0, "creating lock: lock=%p res=%p\n", lock, res); dlmlock()
689 dlm_lock_attach_lockres(lock, res); dlmlock()
690 lock->ast = ast; dlmlock()
691 lock->bast = bast; dlmlock()
692 lock->astdata = data; dlmlock()
704 lock->lksb->flags |= DLM_LKSB_GET_LVB; dlmlock()
709 status = dlmlock_master(dlm, res, lock, flags); dlmlock()
711 status = dlmlock_remote(dlm, res, lock, flags); dlmlock()
739 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; dlmlock()
748 if (lock && !convert) dlmlock()
749 dlm_lock_put(lock); dlmlock()
91 dlm_can_grant_new_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_can_grant_new_lock() argument
119 dlmlock_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) dlmlock_master() argument
199 dlm_revert_pending_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_revert_pending_lock() argument
215 dlmlock_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) dlmlock_remote() argument
301 dlm_send_remote_lock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) dlm_send_remote_lock_request() argument
H A Ddlmunlock.c60 struct dlm_lock *lock,
65 struct dlm_lock *lock,
71 struct dlm_lock *lock,
84 * So to unlock a converting lock, you must first cancel the
93 * taken: res->spinlock and lock->spinlock taken and dropped
96 * all callers should have taken an extra ref on lock coming in
100 struct dlm_lock *lock, dlmunlock_common()
119 /* We want to be sure that we're not freeing a lock dlmunlock_common()
121 in_use = !list_empty(&lock->ast_list); dlmunlock_common()
141 spin_lock(&lock->spinlock); dlmunlock_common()
154 * LKM_CANCEL and the lock queue state */ dlmunlock_common()
156 status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); dlmunlock_common()
158 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); dlmunlock_common()
177 lock->cancel_pending = 1; dlmunlock_common()
179 lock->unlock_pending = 1; dlmunlock_common()
180 spin_unlock(&lock->spinlock); dlmunlock_common()
182 status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, dlmunlock_common()
185 spin_lock(&lock->spinlock); dlmunlock_common()
186 /* if the master told us the lock was already granted, dlmunlock_common()
210 lock->cancel_pending = 0; dlmunlock_common()
212 lock->unlock_pending = 0; dlmunlock_common()
216 /* get an extra ref on lock. if we are just switching dlmunlock_common()
217 * lists here, we dont want the lock to go away. */ dlmunlock_common()
218 dlm_lock_get(lock); dlmunlock_common()
221 list_del_init(&lock->list); dlmunlock_common()
222 dlm_lock_put(lock); dlmunlock_common()
225 dlm_lock_get(lock); dlmunlock_common()
226 list_add_tail(&lock->list, &res->granted); dlmunlock_common()
231 lock->ml.convert_type = LKM_IVMODE; dlmunlock_common()
234 /* remove the extra ref on lock */ dlmunlock_common()
235 dlm_lock_put(lock); dlmunlock_common()
239 if (!dlm_lock_on_list(&res->converting, lock)) dlmunlock_common()
240 BUG_ON(lock->ml.convert_type != LKM_IVMODE); dlmunlock_common()
242 BUG_ON(lock->ml.convert_type == LKM_IVMODE); dlmunlock_common()
243 spin_unlock(&lock->spinlock); dlmunlock_common()
251 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", dlmunlock_common()
252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmunlock_common()
253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmunlock_common()
254 atomic_read(&lock->lock_refs.refcount)-1); dlmunlock_common()
255 dlm_lock_put(lock); dlmunlock_common()
268 struct dlm_lock *lock) dlm_commit_pending_unlock()
272 list_del_init(&lock->list); dlm_commit_pending_unlock()
276 struct dlm_lock *lock) dlm_commit_pending_cancel()
278 list_move_tail(&lock->list, &res->granted); dlm_commit_pending_cancel()
279 lock->ml.convert_type = LKM_IVMODE; dlm_commit_pending_cancel()
285 struct dlm_lock *lock, dlmunlock_master()
290 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); dlmunlock_master()
295 struct dlm_lock *lock, dlmunlock_remote()
299 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); dlmunlock_remote()
311 struct dlm_lock *lock, dlm_send_remote_unlock_request()
338 unlock.cookie = lock->ml.cookie; dlm_send_remote_unlock_request()
348 vec[1].iov_base = lock->lksb->lvb; dlm_send_remote_unlock_request()
397 struct dlm_lock *lock = NULL; dlm_unlock_lock_handler() local
433 /* We assume here that a no lock resource simply means dlm_unlock_lock_handler()
466 list_for_each_entry(lock, queue, list) { list_for_each_entry()
467 if (lock->ml.cookie == unlock->cookie && list_for_each_entry()
468 lock->ml.node == unlock->node_idx) { list_for_each_entry()
469 dlm_lock_get(lock); list_for_each_entry()
485 /* lock was found on queue */
486 lksb = lock->lksb;
488 lock->ml.type != LKM_EXMODE)
499 status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
511 mlog(ML_ERROR, "failed to find lock to unlock! "
516 dlm_lock_put(lock);
530 struct dlm_lock *lock, dlm_get_cancel_actions()
536 if (dlm_lock_on_list(&res->blocked, lock)) { dlm_get_cancel_actions()
541 } else if (dlm_lock_on_list(&res->converting, lock)) { dlm_get_cancel_actions()
548 } else if (dlm_lock_on_list(&res->granted, lock)) { dlm_get_cancel_actions()
553 mlog(ML_ERROR, "lock to cancel is not on any list!\n"); dlm_get_cancel_actions()
562 struct dlm_lock *lock, dlm_get_unlock_actions()
569 if (!dlm_lock_on_list(&res->granted, lock)) { dlm_get_unlock_actions()
574 /* unlock granted lock */ dlm_get_unlock_actions()
592 struct dlm_lock *lock = NULL; dlmunlock() local
615 lock = lksb->lockid; dlmunlock()
616 BUG_ON(!lock); dlmunlock()
617 dlm_lock_get(lock); dlmunlock()
619 res = lock->lockres; dlmunlock()
625 mlog(0, "lock=%p res=%p\n", lock, res); dlmunlock()
629 if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) dlmunlock()
634 status = dlmunlock_master(dlm, res, lock, lksb, flags, dlmunlock()
639 status = dlmunlock_remote(dlm, res, lock, lksb, flags, dlmunlock()
672 * since this lock has been removed from the dlmunlock()
676 dlm_lock_basts_flushed(dlm, lock)); dlmunlock()
692 dlm_lock_put(lock); dlmunlock()
98 dlmunlock_common(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast, int master_node) dlmunlock_common() argument
267 dlm_commit_pending_unlock(struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_commit_pending_unlock() argument
275 dlm_commit_pending_cancel(struct dlm_lock_resource *res, struct dlm_lock *lock) dlm_commit_pending_cancel() argument
283 dlmunlock_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast) dlmunlock_master() argument
293 dlmunlock_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast) dlmunlock_remote() argument
309 dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, u8 owner) dlm_send_remote_unlock_request() argument
528 dlm_get_cancel_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions) dlm_get_cancel_actions() argument
560 dlm_get_unlock_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions) dlm_get_unlock_actions() argument
H A Ddlmthread.c57 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
91 * the first lock), and has no bits in its refmap.
291 struct dlm_lock *lock, *target; dlm_shuffle_lists() local
314 mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n", dlm_shuffle_lists()
318 list_for_each_entry(lock, &res->granted, list) { dlm_shuffle_lists()
319 if (lock==target) dlm_shuffle_lists()
321 if (!dlm_lock_compatible(lock->ml.type, dlm_shuffle_lists()
325 if (lock->ml.highest_blocked == LKM_IVMODE) { dlm_shuffle_lists()
327 __dlm_queue_bast(dlm, lock); dlm_shuffle_lists()
330 if (lock->ml.highest_blocked < target->ml.convert_type) dlm_shuffle_lists()
331 lock->ml.highest_blocked = dlm_shuffle_lists()
336 list_for_each_entry(lock, &res->converting, list) { dlm_shuffle_lists()
337 if (lock==target) dlm_shuffle_lists()
339 if (!dlm_lock_compatible(lock->ml.type, dlm_shuffle_lists()
342 if (lock->ml.highest_blocked == LKM_IVMODE) { dlm_shuffle_lists()
344 __dlm_queue_bast(dlm, lock); dlm_shuffle_lists()
346 if (lock->ml.highest_blocked < target->ml.convert_type) dlm_shuffle_lists()
347 lock->ml.highest_blocked = dlm_shuffle_lists()
352 /* we can convert the lock */ dlm_shuffle_lists()
357 mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type " dlm_shuffle_lists()
385 list_for_each_entry(lock, &res->granted, list) { dlm_shuffle_lists()
386 if (lock==target) dlm_shuffle_lists()
388 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { dlm_shuffle_lists()
390 if (lock->ml.highest_blocked == LKM_IVMODE) { dlm_shuffle_lists()
392 __dlm_queue_bast(dlm, lock); dlm_shuffle_lists()
394 if (lock->ml.highest_blocked < target->ml.type) dlm_shuffle_lists()
395 lock->ml.highest_blocked = target->ml.type; dlm_shuffle_lists()
399 list_for_each_entry(lock, &res->converting, list) { dlm_shuffle_lists()
400 if (lock==target) dlm_shuffle_lists()
402 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { dlm_shuffle_lists()
404 if (lock->ml.highest_blocked == LKM_IVMODE) { dlm_shuffle_lists()
406 __dlm_queue_bast(dlm, lock); dlm_shuffle_lists()
408 if (lock->ml.highest_blocked < target->ml.type) dlm_shuffle_lists()
409 lock->ml.highest_blocked = target->ml.type; dlm_shuffle_lists()
413 /* we can grant the blocked lock (only dlm_shuffle_lists()
419 mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, " dlm_shuffle_lists()
519 struct dlm_lock *lock; dlm_flush_asts() local
525 lock = list_entry(dlm->pending_asts.next, dlm_flush_asts()
527 /* get an extra ref on lock */ dlm_flush_asts()
528 dlm_lock_get(lock); dlm_flush_asts()
529 res = lock->lockres; dlm_flush_asts()
530 mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, " dlm_flush_asts()
533 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts()
534 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts()
535 lock->ml.type, lock->ml.node); dlm_flush_asts()
537 BUG_ON(!lock->ast_pending); dlm_flush_asts()
540 list_del_init(&lock->ast_list); dlm_flush_asts()
541 dlm_lock_put(lock); dlm_flush_asts()
544 if (lock->ml.node != dlm->node_num) { dlm_flush_asts()
545 ret = dlm_do_remote_ast(dlm, res, lock); dlm_flush_asts()
549 dlm_do_local_ast(dlm, res, lock); dlm_flush_asts()
555 if (!list_empty(&lock->ast_list)) { dlm_flush_asts()
560 lock->ast_pending = 0; dlm_flush_asts()
564 dlm_lock_put(lock); dlm_flush_asts()
569 lock = list_entry(dlm->pending_basts.next, dlm_flush_asts()
571 /* get an extra ref on lock */ dlm_flush_asts()
572 dlm_lock_get(lock); dlm_flush_asts()
573 res = lock->lockres; dlm_flush_asts()
575 BUG_ON(!lock->bast_pending); dlm_flush_asts()
577 /* get the highest blocked lock, and reset */ dlm_flush_asts()
578 spin_lock(&lock->spinlock); dlm_flush_asts()
579 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); dlm_flush_asts()
580 hi = lock->ml.highest_blocked; dlm_flush_asts()
581 lock->ml.highest_blocked = LKM_IVMODE; dlm_flush_asts()
582 spin_unlock(&lock->spinlock); dlm_flush_asts()
585 list_del_init(&lock->bast_list); dlm_flush_asts()
586 dlm_lock_put(lock); dlm_flush_asts()
589 mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, " dlm_flush_asts()
592 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts()
593 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts()
594 hi, lock->ml.node); dlm_flush_asts()
596 if (lock->ml.node != dlm->node_num) { dlm_flush_asts()
597 ret = dlm_send_proxy_bast(dlm, res, lock, hi); dlm_flush_asts()
601 dlm_do_local_bast(dlm, res, lock, hi); dlm_flush_asts()
607 if (!list_empty(&lock->bast_list)) { dlm_flush_asts()
612 lock->bast_pending = 0; dlm_flush_asts()
616 dlm_lock_put(lock); dlm_flush_asts()
659 * unset the dirty flag and drop the dlm lock */ dlm_thread()
707 * spinlock and do NOT have the dlm lock. dlm_thread()
710 /* called while holding lockres lock */ dlm_thread()
721 /* if the lock was in-progress, stick dlm_thread()
H A Ddlmapi.h40 DLM_BLOCKED, /* 5: lock request blocked */
41 DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/
52 DLM_NOLOCKMGR, /* 17: can't contact lock manager */
58 DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */
59 DLM_BADPARAM, /* 24: invalid lock mode specified */
62 DLM_ABORT, /* 27: blocked lock request cancelled */
68 DLM_TIMEOUT, /* 33: timeout value for lock has expired */
75 DLM_RECOVERING, /* 39: extension, allows caller to fail a lock
77 DLM_MIGRATING, /* 40: extension, allows caller to fail a lock
118 /* Valid lock modes. */
120 #define LKM_NLMODE 0 /* null lock */
133 #define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */
134 #define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */
135 #define LKM_BLOCK 0x00000040 /* blocking lock request (U) */
136 #define LKM_LOCAL 0x00000080 /* local lock request */
137 #define LKM_VALBLK 0x00000100 /* lock value block request */
140 #define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
141 #define LKM_UNLOCK 0x00001000 /* deallocate this lock */
144 #define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */
146 #define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */
148 #define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */
151 #define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */
154 lock value block (U) */
171 from lockres when lock is granted */
172 #define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock
/linux-4.1.27/arch/sh/include/asm/
H A Dspinlock.h26 #define arch_spin_is_locked(x) ((x)->lock <= 0)
27 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
32 * Simple spin lock operations. There are two variants, one clears IRQ's
37 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
52 : "r" (&lock->lock) arch_spin_lock()
57 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
65 : "r" (&lock->lock) arch_spin_unlock()
70 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
83 : "r" (&lock->lock) arch_spin_trylock()
95 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
101 * @lock: the rwlock in question.
103 #define arch_read_can_lock(x) ((x)->lock > 0)
107 * @lock: the rwlock in question.
109 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
124 : "r" (&rw->lock) arch_read_lock()
140 : "r" (&rw->lock) arch_read_unlock()
158 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) arch_write_lock()
168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) arch_write_unlock()
189 : "r" (&rw->lock) arch_read_trylock()
212 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) arch_write_trylock()
219 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
220 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
222 #define arch_spin_relax(lock) cpu_relax()
223 #define arch_read_relax(lock) cpu_relax()
224 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_types.h9 volatile unsigned int lock; member in struct:__anon2581
15 volatile unsigned int lock; member in struct:__anon2582
/linux-4.1.27/arch/hexagon/include/asm/
H A Dspinlock.h34 * - load the lock value
36 * - if the lock value is still negative, go back and try again.
38 * - successful store new lock value if positive -> lock acquired
40 static inline void arch_read_lock(arch_rwlock_t *lock) arch_read_lock() argument
49 : "r" (&lock->lock) arch_read_lock()
55 static inline void arch_read_unlock(arch_rwlock_t *lock) arch_read_unlock() argument
63 : "r" (&lock->lock) arch_read_unlock()
70 static inline int arch_read_trylock(arch_rwlock_t *lock) arch_read_trylock() argument
81 : "r" (&lock->lock) arch_read_trylock()
89 return rwlock->lock == 0; arch_read_can_lock()
94 return rwlock->lock == 0; arch_write_can_lock()
97 /* Stuffs a -1 in the lock value? */ arch_write_lock()
98 static inline void arch_write_lock(arch_rwlock_t *lock) arch_write_lock() argument
107 : "r" (&lock->lock) arch_write_lock()
113 static inline int arch_write_trylock(arch_rwlock_t *lock) arch_write_trylock() argument
124 : "r" (&lock->lock) arch_write_trylock()
131 static inline void arch_write_unlock(arch_rwlock_t *lock) arch_write_unlock() argument
134 lock->lock = 0; arch_write_unlock()
137 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
146 : "r" (&lock->lock) arch_spin_lock()
152 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
155 lock->lock = 0; arch_spin_unlock()
158 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
169 : "r" (&lock->lock) arch_spin_trylock()
176 * SMP spinlocks are intended to allow only a single CPU at the lock
178 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
179 #define arch_spin_unlock_wait(lock) \
180 do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
181 #define arch_spin_is_locked(x) ((x)->lock != 0)
183 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
184 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
/linux-4.1.27/arch/arm64/include/asm/
H A Dspinlock.h29 #define arch_spin_unlock_wait(lock) \
30 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
32 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
34 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
46 /* Did we get the lock? */ arch_spin_lock()
58 /* We got the lock. Critical section starts here. */ arch_spin_lock()
60 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) arch_spin_lock()
61 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) arch_spin_lock()
65 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
79 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) arch_spin_trylock()
86 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
90 : "=Q" (lock->owner) arch_spin_unlock()
91 : "r" (lock->owner + 1) arch_spin_unlock()
95 static inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
97 return lock.owner == lock.next; arch_spin_value_unlocked()
100 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
102 return !arch_spin_value_unlocked(READ_ONCE(*lock)); arch_spin_is_locked()
105 static inline int arch_spin_is_contended(arch_spinlock_t *lock) arch_spin_is_contended() argument
107 arch_spinlock_t lockval = READ_ONCE(*lock); arch_spin_is_contended()
113 * Write lock implementation.
115 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
133 : "=&r" (tmp), "+Q" (rw->lock) arch_write_lock()
147 : "=&r" (tmp), "+Q" (rw->lock) arch_write_trylock()
158 : "=Q" (rw->lock) : "r" (0) : "memory"); arch_write_unlock()
162 #define arch_write_can_lock(x) ((x)->lock == 0)
165 * Read lock implementation.
167 * It exclusively loads the lock value, increments it and stores the new value
169 * value is negative, the lock is already held.
171 * During unlocking there may be multiple active read locks but no write lock.
188 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) arch_read_lock()
202 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) arch_read_unlock()
217 : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) arch_read_trylock()
225 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
227 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
228 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
230 #define arch_spin_relax(lock) cpu_relax()
231 #define arch_read_relax(lock) cpu_relax()
232 #define arch_write_relax(lock) cpu_relax()
/linux-4.1.27/lib/
H A Datomic64.c29 * Ensure each lock is in a separate cacheline.
32 raw_spinlock_t lock; member in union:__anon14019
36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
46 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; lock_addr()
52 raw_spinlock_t *lock = lock_addr(v); atomic64_read() local
55 raw_spin_lock_irqsave(lock, flags); atomic64_read()
57 raw_spin_unlock_irqrestore(lock, flags); atomic64_read()
65 raw_spinlock_t *lock = lock_addr(v); atomic64_set() local
67 raw_spin_lock_irqsave(lock, flags); atomic64_set()
69 raw_spin_unlock_irqrestore(lock, flags); atomic64_set()
77 raw_spinlock_t *lock = lock_addr(v); \
79 raw_spin_lock_irqsave(lock, flags); \
81 raw_spin_unlock_irqrestore(lock, flags); \
89 raw_spinlock_t *lock = lock_addr(v); \
92 raw_spin_lock_irqsave(lock, flags); \
94 raw_spin_unlock_irqrestore(lock, flags); \
113 raw_spinlock_t *lock = lock_addr(v); atomic64_dec_if_positive() local
116 raw_spin_lock_irqsave(lock, flags); atomic64_dec_if_positive()
120 raw_spin_unlock_irqrestore(lock, flags); atomic64_dec_if_positive()
128 raw_spinlock_t *lock = lock_addr(v); atomic64_cmpxchg() local
131 raw_spin_lock_irqsave(lock, flags); atomic64_cmpxchg()
135 raw_spin_unlock_irqrestore(lock, flags); atomic64_cmpxchg()
143 raw_spinlock_t *lock = lock_addr(v); atomic64_xchg() local
146 raw_spin_lock_irqsave(lock, flags); atomic64_xchg()
149 raw_spin_unlock_irqrestore(lock, flags); atomic64_xchg()
157 raw_spinlock_t *lock = lock_addr(v); atomic64_add_unless() local
160 raw_spin_lock_irqsave(lock, flags); atomic64_add_unless()
165 raw_spin_unlock_irqrestore(lock, flags); atomic64_add_unless()
H A Ddec_and_lock.c12 * spin_lock(&lock);
17 * because the spin-lock and the decrement must be
20 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) _atomic_dec_and_lock() argument
27 spin_lock(lock); _atomic_dec_and_lock()
30 spin_unlock(lock); _atomic_dec_and_lock()
H A Dlockref.c22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
56 spin_lock(&lockref->lock); lockref_get()
58 spin_unlock(&lockref->lock); lockref_get()
79 spin_lock(&lockref->lock); lockref_get_not_zero()
85 spin_unlock(&lockref->lock); lockref_get_not_zero()
94 * and we got the lock instead.
106 spin_lock(&lockref->lock); lockref_get_or_lock()
110 spin_unlock(&lockref->lock); lockref_get_or_lock()
138 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
150 spin_lock(&lockref->lock); lockref_put_or_lock()
154 spin_unlock(&lockref->lock); lockref_put_or_lock()
165 assert_spin_locked(&lockref->lock); lockref_mark_dead()
187 spin_lock(&lockref->lock); lockref_get_not_dead()
193 spin_unlock(&lockref->lock); lockref_get_not_dead()
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dobd.h61 spinlock_t lock; member in struct:__anon10001
69 static inline void __client_obd_list_lock(client_obd_lock_t *lock, __client_obd_list_lock() argument
74 if (spin_trylock(&lock->lock)) { __client_obd_list_lock()
75 LASSERT(lock->task == NULL); __client_obd_list_lock()
76 lock->task = current; __client_obd_list_lock()
77 lock->func = func; __client_obd_list_lock()
78 lock->line = line; __client_obd_list_lock()
79 lock->time = jiffies; __client_obd_list_lock()
84 time_before(lock->time + 5 * HZ, jiffies)) { __client_obd_list_lock()
85 struct task_struct *task = lock->task; __client_obd_list_lock()
90 LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n", __client_obd_list_lock()
92 lock, task->comm, task->pid, __client_obd_list_lock()
93 lock->func, lock->line, __client_obd_list_lock()
94 (jiffies - lock->time) / HZ); __client_obd_list_lock()
105 #define client_obd_list_lock(lock) \
106 __client_obd_list_lock(lock, __func__, __LINE__)
108 static inline void client_obd_list_unlock(client_obd_lock_t *lock) client_obd_list_unlock() argument
110 LASSERT(lock->task != NULL); client_obd_list_unlock()
111 lock->task = NULL; client_obd_list_unlock()
112 lock->time = jiffies; client_obd_list_unlock()
113 spin_unlock(&lock->lock); client_obd_list_unlock()
117 static inline void client_obd_list_lock_init(client_obd_lock_t *lock) client_obd_list_lock_init() argument
119 spin_lock_init(&lock->lock); client_obd_list_lock_init()
122 static inline void client_obd_list_lock_done(client_obd_lock_t *lock) client_obd_list_lock_done() argument
/linux-4.1.27/arch/tile/include/asm/
H A Dspinlock_32.h36 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
40 * acquired, so lock->next_ticket is 1, it's still reasonable arch_spin_is_locked()
41 * to claim the lock is held, since it will be momentarily arch_spin_is_locked()
43 * lock->next_ticket to become available. arch_spin_is_locked()
45 return lock->next_ticket != lock->current_ticket; arch_spin_is_locked()
48 void arch_spin_lock(arch_spinlock_t *lock);
51 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
53 int arch_spin_trylock(arch_spinlock_t *lock);
55 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
58 int old_ticket = lock->current_ticket; arch_spin_unlock()
59 wmb(); /* guarantee anything modified under the lock is visible */ arch_spin_unlock()
60 lock->current_ticket = old_ticket + TICKET_QUANTUM; arch_spin_unlock()
63 void arch_spin_unlock_wait(arch_spinlock_t *lock);
70 * the lock state, looping around to retry if the tns returns 1.
85 return (rwlock->lock << _RD_COUNT_WIDTH) == 0; arch_read_can_lock()
93 return rwlock->lock == 0; arch_write_can_lock()
97 * arch_read_lock() - acquire a read lock.
102 * arch_write_lock() - acquire a write lock.
107 * arch_read_trylock() - try to acquire a read lock.
112 * arch_write_trylock() - try to acquire a write lock.
117 * arch_read_unlock() - release a read lock.
122 * arch_write_unlock() - release a write lock.
126 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
127 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
H A Dspinlock_64.h21 /* Shifts and masks for the various fields in "lock". */
27 * Return the "current" portion of a ticket lock value,
28 * i.e. the number that currently owns the lock.
36 * Return the "next" portion of a ticket lock value,
37 * i.e. the number that the next task to try to acquire the lock will get.
44 /* The lock is locked if a task would have to wait to get it. */ arch_spin_is_locked()
45 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
47 u32 val = lock->lock; arch_spin_is_locked()
51 /* Bump the current ticket so the next task owns the lock. */ arch_spin_unlock()
52 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
54 wmb(); /* guarantee anything modified under the lock is visible */ arch_spin_unlock()
55 __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); arch_spin_unlock()
58 void arch_spin_unlock_wait(arch_spinlock_t *lock);
60 void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
66 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
68 u32 val = __insn_fetchadd4(&lock->lock, 1); arch_spin_lock()
71 arch_spin_lock_slow(lock, ticket); arch_spin_lock()
74 /* Try to get the lock, and return whether we succeeded. */
75 int arch_spin_trylock(arch_spinlock_t *lock);
78 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
97 * @lock: the rwlock in question.
101 return !arch_write_val_locked(rw->lock); arch_read_can_lock()
106 * @lock: the rwlock in question.
110 return rw->lock == 0; arch_write_can_lock()
117 u32 val = __insn_fetchaddgez4(&rw->lock, 1); arch_read_lock()
126 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); arch_write_lock()
134 __insn_fetchadd4(&rw->lock, -1); arch_read_unlock()
140 __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */ arch_write_unlock()
145 return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1)); arch_read_trylock()
150 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); arch_write_trylock()
154 __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); arch_write_trylock()
158 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
159 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
H A Dspinlock_types.h26 unsigned int lock; member in struct:arch_spinlock
33 unsigned int lock; member in struct:arch_rwlock
43 /* The ticket number that currently owns this lock. */
50 * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next",
51 * byte 2 for ticket-lock "current", byte 3 for reader count.
54 unsigned int lock; member in struct:arch_rwlock
/linux-4.1.27/arch/m32r/include/asm/
H A Dspinlock.h22 * Simple spin lock operations. There are two variants, one clears IRQ's
29 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
34 * arch_spin_trylock - Try spin lock and return a result
35 * @lock: Pointer to the lock variable
37 * arch_spin_trylock() tries to get the lock and returns a result.
40 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
46 * lock->slock : =1 : unlock arch_spin_trylock()
47 * : <=0 : lock arch_spin_trylock()
49 * oldval = lock->slock; <--+ need atomic operation arch_spin_trylock()
50 * lock->slock = 0; <--+ arch_spin_trylock()
59 "lock %0, @%3; \n\t" arch_spin_trylock()
63 : "r" (&lock->slock) arch_spin_trylock()
73 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
78 * lock->slock : =1 : unlock arch_spin_lock()
79 * : <=0 : lock arch_spin_lock()
82 * lock->slock -= 1; <-- need atomic operation arch_spin_lock()
83 * if (lock->slock == 0) break; arch_spin_lock()
84 * for ( ; lock->slock <= 0 ; ); arch_spin_lock()
94 "lock %0, @%2; \n\t" arch_spin_lock()
107 : "r" (&lock->slock) arch_spin_lock()
115 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
118 lock->slock = 1; arch_spin_unlock()
128 * irq-safe write-lock, but readers can get non-irqsafe
142 * @lock: the rwlock in question.
144 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
148 * @lock: the rwlock in question.
150 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
157 * rw->lock : >0 : unlock arch_read_lock()
158 * : <=0 : lock arch_read_lock()
161 * rw->lock -= 1; <-- need atomic operation arch_read_lock()
162 * if (rw->lock >= 0) break; arch_read_lock()
163 * rw->lock += 1; <-- need atomic operation arch_read_lock()
164 * for ( ; rw->lock <= 0 ; ); arch_read_lock()
174 "lock %0, @%2; \n\t" arch_read_lock()
184 "lock %0, @%2; \n\t" arch_read_lock()
195 : "r" (&rw->lock) arch_read_lock()
208 * rw->lock : =RW_LOCK_BIAS_STR : unlock arch_write_lock()
209 * : !=RW_LOCK_BIAS_STR : lock arch_write_lock()
212 * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation arch_write_lock()
213 * if (rw->lock == 0) break; arch_write_lock()
214 * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation arch_write_lock()
215 * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ; arch_write_lock()
227 "lock %0, @%3; \n\t" arch_write_lock()
237 "lock %0, @%3; \n\t" arch_write_lock()
248 : "r" (&rw->lock) arch_write_lock()
265 "lock %0, @%2; \n\t" arch_read_unlock()
270 : "r" (&rw->lock) arch_read_unlock()
289 "lock %0, @%3; \n\t" arch_write_unlock()
294 : "r" (&rw->lock) arch_write_unlock()
302 static inline int arch_read_trylock(arch_rwlock_t *lock) arch_read_trylock() argument
304 atomic_t *count = (atomic_t*)lock; arch_read_trylock()
311 static inline int arch_write_trylock(arch_rwlock_t *lock) arch_write_trylock() argument
313 atomic_t *count = (atomic_t *)lock; arch_write_trylock()
320 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
321 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
323 #define arch_spin_relax(lock) cpu_relax()
324 #define arch_read_relax(lock) cpu_relax()
325 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_types.h15 volatile int lock; member in struct:__anon1757
/linux-4.1.27/arch/sparc/include/asm/
H A Dspinlock_64.h24 #define arch_spin_is_locked(lp) ((lp)->lock != 0)
28 } while((lp)->lock)
30 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
45 : "r" (lock) arch_spin_lock()
49 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
56 : "r" (lock) arch_spin_trylock()
62 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
67 : "r" (lock) arch_spin_unlock()
71 static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) arch_spin_lock_flags() argument
89 : "r"(lock), "r"(flags) arch_spin_lock_flags()
95 static void inline arch_read_lock(arch_rwlock_t *lock) arch_read_lock() argument
114 : "r" (lock) arch_read_lock()
118 static int inline arch_read_trylock(arch_rwlock_t *lock) arch_read_trylock() argument
133 : "r" (lock) arch_read_trylock()
139 static void inline arch_read_unlock(arch_rwlock_t *lock) arch_read_unlock() argument
151 : "r" (lock) arch_read_unlock()
155 static void inline arch_write_lock(arch_rwlock_t *lock) arch_write_lock() argument
176 : "r" (lock), "r" (mask) arch_write_lock()
180 static void inline arch_write_unlock(arch_rwlock_t *lock) arch_write_unlock() argument
185 : "r" (lock) arch_write_unlock()
189 static int inline arch_write_trylock(arch_rwlock_t *lock) arch_write_trylock() argument
207 : "r" (lock), "r" (mask) arch_write_trylock()
216 #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
217 #define arch_write_can_lock(rw) (!(rw)->lock)
219 #define arch_spin_relax(lock) cpu_relax()
220 #define arch_read_relax(lock) cpu_relax()
221 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_32.h14 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16 #define arch_spin_unlock_wait(lock) \
17 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
19 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
35 : "r" (lock) arch_spin_lock()
39 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
44 : "r" (lock) arch_spin_trylock()
49 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
51 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); arch_spin_unlock()
60 * irq-safe write-lock, but readers can get non-irqsafe
75 * but counter is non-zero, he has to release the lock and wait,
93 #define arch_read_lock(lock) \
96 __arch_read_lock(lock); \
113 #define arch_read_unlock(lock) \
116 __arch_read_unlock(lock); \
131 *(volatile __u32 *)&lp->lock = ~0U; arch_write_lock()
134 static void inline arch_write_unlock(arch_rwlock_t *lock) arch_write_unlock() argument
139 : "r" (lock) arch_write_unlock()
149 : "r" (&rw->lock) arch_write_trylock()
153 val = rw->lock & ~0xff; arch_write_trylock()
155 ((volatile u8*)&rw->lock)[3] = 0; arch_write_trylock()
157 *(volatile u32*)&rw->lock = ~0U; arch_write_trylock()
178 #define arch_read_trylock(lock) \
182 res = __arch_read_trylock(lock); \
187 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
191 #define arch_spin_relax(lock) cpu_relax()
192 #define arch_read_relax(lock) cpu_relax()
193 #define arch_write_relax(lock) cpu_relax()
195 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
196 #define arch_write_can_lock(rw) (!(rw)->lock)
H A Dspinlock_types.h9 volatile unsigned char lock; member in struct:__anon2717
15 volatile unsigned int lock; member in struct:__anon2718
/linux-4.1.27/drivers/acpi/acpica/
H A Dutlock.c3 * Module Name: utlock - Reader/Writer lock interfaces
55 * PARAMETERS: lock - Pointer to a valid RW lock
59 * DESCRIPTION: Reader/writer lock creation and deletion interfaces.
62 acpi_status acpi_ut_create_rw_lock(struct acpi_rw_lock *lock) acpi_ut_create_rw_lock() argument
66 lock->num_readers = 0; acpi_ut_create_rw_lock()
67 status = acpi_os_create_mutex(&lock->reader_mutex); acpi_ut_create_rw_lock()
72 status = acpi_os_create_mutex(&lock->writer_mutex); acpi_ut_create_rw_lock()
76 void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock) acpi_ut_delete_rw_lock() argument
79 acpi_os_delete_mutex(lock->reader_mutex); acpi_ut_delete_rw_lock()
80 acpi_os_delete_mutex(lock->writer_mutex); acpi_ut_delete_rw_lock()
82 lock->num_readers = 0; acpi_ut_delete_rw_lock()
83 lock->reader_mutex = NULL; acpi_ut_delete_rw_lock()
84 lock->writer_mutex = NULL; acpi_ut_delete_rw_lock()
92 * PARAMETERS: lock - Pointer to a valid RW lock
105 acpi_status acpi_ut_acquire_read_lock(struct acpi_rw_lock *lock) acpi_ut_acquire_read_lock() argument
109 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); acpi_ut_acquire_read_lock()
114 /* Acquire the write lock only for the first reader */ acpi_ut_acquire_read_lock()
116 lock->num_readers++; acpi_ut_acquire_read_lock()
117 if (lock->num_readers == 1) { acpi_ut_acquire_read_lock()
119 acpi_os_acquire_mutex(lock->writer_mutex, acpi_ut_acquire_read_lock()
123 acpi_os_release_mutex(lock->reader_mutex); acpi_ut_acquire_read_lock()
127 acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock) acpi_ut_release_read_lock() argument
131 status = acpi_os_acquire_mutex(lock->reader_mutex, ACPI_WAIT_FOREVER); acpi_ut_release_read_lock()
136 /* Release the write lock only for the very last reader */ acpi_ut_release_read_lock()
138 lock->num_readers--; acpi_ut_release_read_lock()
139 if (lock->num_readers == 0) { acpi_ut_release_read_lock()
140 acpi_os_release_mutex(lock->writer_mutex); acpi_ut_release_read_lock()
143 acpi_os_release_mutex(lock->reader_mutex); acpi_ut_release_read_lock()
152 * PARAMETERS: lock - Pointer to a valid RW lock
157 * release the writer mutex associated with the lock. Acquisition
158 * of the lock is fully exclusive and will block all readers and
163 acpi_status acpi_ut_acquire_write_lock(struct acpi_rw_lock *lock) acpi_ut_acquire_write_lock() argument
167 status = acpi_os_acquire_mutex(lock->writer_mutex, ACPI_WAIT_FOREVER); acpi_ut_acquire_write_lock()
171 void acpi_ut_release_write_lock(struct acpi_rw_lock *lock) acpi_ut_release_write_lock() argument
174 acpi_os_release_mutex(lock->writer_mutex); acpi_ut_release_write_lock()
H A Devglock.c63 * DESCRIPTION: Install a handler for the global lock release event
73 /* If Hardware Reduced flag is set, there is no global lock */ acpi_ev_init_global_lock_handler()
79 /* Attempt installation of the global lock handler */ acpi_ev_init_global_lock_handler()
86 * If the global lock does not exist on this platform, the attempt to acpi_ev_init_global_lock_handler()
88 * Map to AE_OK, but mark global lock as not present. Any attempt to acpi_ev_init_global_lock_handler()
89 * actually use the global lock will be flagged with an error. acpi_ev_init_global_lock_handler()
94 "No response from Global Lock hardware, disabling lock")); acpi_ev_init_global_lock_handler()
143 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
145 * request for the lock, signal the waiting thread.
157 * If a request for the global lock is not actually pending, acpi_ev_global_lock_handler()
158 * we are done. This handles "spurious" global lock interrupts acpi_ev_global_lock_handler()
166 * Send a unit to the global lock semaphore. The actual acquisition acpi_ev_global_lock_handler()
167 * of the global lock will be performed by the waiting thread. acpi_ev_global_lock_handler()
186 * PARAMETERS: timeout - Max time to wait for the lock, in millisec.
195 * Global Lock, and the OS would hold the lock until the last thread had
197 * lock, especially in the case where there is a tight handshake between the
200 * the global lock appear as a standard mutex on the OS side.
224 * Update the global lock handle and check for wraparound. The handle is acpi_ev_acquire_global_lock()
225 * only used for the external global lock interfaces, but it is updated acpi_ev_acquire_global_lock()
227 * lock via both the AML and the acpi_acquire_global_lock interfaces. The acpi_ev_acquire_global_lock()
237 * Make sure that a global lock actually exists. If not, just acpi_ev_acquire_global_lock()
238 * treat the lock as a standard mutex. acpi_ev_acquire_global_lock()
249 /* Attempt to acquire the actual hardware lock */ acpi_ev_acquire_global_lock()
260 * Did not get the lock. The pending bit was set above, and acpi_ev_acquire_global_lock()
261 * we must now wait until we receive the global lock acpi_ev_acquire_global_lock()
271 * Wait for handshake with the global lock interrupt handler. acpi_ev_acquire_global_lock()
317 /* Allow any thread to release the lock */ acpi_ev_release_global_lock()
/linux-4.1.27/arch/s390/lib/
H A Dspinlock.c35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) _raw_compare_and_delay() argument
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); _raw_compare_and_delay()
47 owner = ACCESS_ONCE(lp->lock); arch_spin_lock_wait()
48 /* Try to get the lock if it is free. */ arch_spin_lock_wait()
50 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) arch_spin_lock_wait()
54 /* Check if the lock owner is running. */ arch_spin_lock_wait()
59 /* Loop for a while on the lock value. */ arch_spin_lock_wait()
63 _raw_compare_and_delay(&lp->lock, owner); arch_spin_lock_wait()
64 owner = ACCESS_ONCE(lp->lock); arch_spin_lock_wait()
70 * yield the CPU if the lock is still unavailable. arch_spin_lock_wait()
86 owner = ACCESS_ONCE(lp->lock); arch_spin_lock_wait_flags()
87 /* Try to get the lock if it is free. */ arch_spin_lock_wait_flags()
90 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) arch_spin_lock_wait_flags()
94 /* Check if the lock owner is running. */ arch_spin_lock_wait_flags()
99 /* Loop for a while on the lock value. */ arch_spin_lock_wait_flags()
103 _raw_compare_and_delay(&lp->lock, owner); arch_spin_lock_wait_flags()
104 owner = ACCESS_ONCE(lp->lock); arch_spin_lock_wait_flags()
110 * yield the CPU if the lock is still unavailable. arch_spin_lock_wait_flags()
125 owner = ACCESS_ONCE(lp->lock); arch_spin_trylock_retry()
126 /* Try to get the lock if it is free. */ arch_spin_trylock_retry()
128 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) arch_spin_trylock_retry()
131 _raw_compare_and_delay(&lp->lock, owner); arch_spin_trylock_retry()
143 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); _raw_read_lock_wait()
152 old = ACCESS_ONCE(rw->lock); _raw_read_lock_wait()
156 _raw_compare_and_delay(&rw->lock, old); _raw_read_lock_wait()
159 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) _raw_read_lock_wait()
171 old = ACCESS_ONCE(rw->lock); _raw_read_trylock_retry()
174 _raw_compare_and_delay(&rw->lock, old); _raw_read_trylock_retry()
177 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) _raw_read_trylock_retry()
198 old = ACCESS_ONCE(rw->lock); _raw_write_lock_wait()
202 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); _raw_write_lock_wait()
208 _raw_compare_and_delay(&rw->lock, old); _raw_write_lock_wait()
228 old = ACCESS_ONCE(rw->lock); _raw_write_lock_wait()
231 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) _raw_write_lock_wait()
238 _raw_compare_and_delay(&rw->lock, old); _raw_write_lock_wait()
251 old = ACCESS_ONCE(rw->lock); _raw_write_trylock_retry()
254 _raw_compare_and_delay(&rw->lock, old); _raw_write_trylock_retry()
257 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) _raw_write_trylock_retry()
/linux-4.1.27/arch/mips/include/asm/
H A Dspinlock.h21 * Simple spin lock operations. There are two variants, one clears IRQ's
32 * the queue, and the other indicating the current tail. The lock is acquired
38 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
40 u32 counters = ACCESS_ONCE(lock->lock); arch_spin_is_locked()
45 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
49 static inline int arch_spin_is_contended(arch_spinlock_t *lock) arch_spin_is_contended() argument
51 u32 counters = ACCESS_ONCE(lock->lock); arch_spin_is_contended()
57 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), arch_spin_lock()
93 [serving_now_ptr] "+m" (lock->h.serving_now), arch_spin_lock()
125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), arch_spin_lock()
126 [serving_now_ptr] "+m" (lock->h.serving_now), arch_spin_lock()
135 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
137 unsigned int serving_now = lock->h.serving_now + 1; arch_spin_unlock()
139 lock->h.serving_now = (u16)serving_now; arch_spin_unlock()
143 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), arch_spin_trylock()
191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), arch_spin_trylock()
208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
214 * @lock: the rwlock in question.
216 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
220 * @lock: the rwlock in question.
222 #define arch_write_can_lock(rw) (!(rw)->lock)
238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_read_lock()
239 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_lock()
248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_read_lock()
249 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_lock()
269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_read_unlock()
270 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_unlock()
278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_read_unlock()
279 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_unlock()
299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_write_lock()
300 : GCC_OFF_SMALL_ASM() (rw->lock) arch_write_lock()
309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) arch_write_lock()
310 : GCC_OFF_SMALL_ASM() (rw->lock) arch_write_lock()
325 : "=m" (rw->lock) arch_write_unlock()
326 : "m" (rw->lock) arch_write_unlock()
349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) arch_read_trylock()
350 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_trylock()
366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) arch_read_trylock()
367 : GCC_OFF_SMALL_ASM() (rw->lock) arch_read_trylock()
393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) arch_write_trylock()
394 : GCC_OFF_SMALL_ASM() (rw->lock) arch_write_trylock()
406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), arch_write_trylock()
408 : GCC_OFF_SMALL_ASM() (rw->lock) arch_write_trylock()
418 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
419 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
421 #define arch_spin_relax(lock) cpu_relax()
422 #define arch_read_relax(lock) cpu_relax()
423 #define arch_write_relax(lock) cpu_relax()
H A Dspinlock_types.h17 u32 lock; member in union:__anon2091
29 #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0 }
32 volatile unsigned int lock; member in struct:__anon2093
/linux-4.1.27/arch/xtensa/include/asm/
H A Dspinlock.h21 * waits, until the lock is obtained. When spinning, presumably some other
32 #define arch_spin_unlock_wait(lock) \
33 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
35 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
37 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
48 : "a" (&lock->slock) arch_spin_lock()
52 /* Returns 1 if the lock is obtained, 0 otherwise. */
54 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
64 : "a" (&lock->slock) arch_spin_trylock()
70 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
78 : "a" (&lock->slock) arch_spin_unlock()
99 #define arch_write_can_lock(x) ((x)->lock == 0)
113 : "a" (&rw->lock) arch_write_lock()
117 /* Returns 1 if the lock is obtained, 0 otherwise. */
130 : "a" (&rw->lock) arch_write_trylock()
144 : "a" (&rw->lock) arch_write_unlock()
161 : "a" (&rw->lock) arch_read_lock()
165 /* Returns 1 if the lock is obtained, 0 otherwise. */
181 : "a" (&rw->lock) arch_read_trylock()
198 : "a" (&rw->lock) arch_read_unlock()
202 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
203 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
H A Dspinlock_types.h15 volatile unsigned int lock; member in struct:__anon3219
/linux-4.1.27/arch/mn10300/include/asm/
H A Dspinlock.h19 * Simple spin lock operations. There are two variants, one clears IRQ's
28 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
33 : "a"(&lock->slock) arch_spin_unlock()
37 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
48 : "a"(&lock->slock) arch_spin_trylock()
54 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
60 : "a"(&lock->slock) arch_spin_lock()
64 static inline void arch_spin_lock_flags(arch_spinlock_t *lock, arch_spin_lock_flags() argument
83 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL) arch_spin_lock_flags()
96 * irq-safe write-lock, but readers can get non-irqsafe
102 * @lock: the rwlock in question.
104 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
108 * @lock: the rwlock in question.
110 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
166 static inline int arch_read_trylock(arch_rwlock_t *lock) arch_read_trylock() argument
168 atomic_t *count = (atomic_t *)lock; arch_read_trylock()
176 static inline int arch_write_trylock(arch_rwlock_t *lock) arch_write_trylock() argument
178 atomic_t *count = (atomic_t *)lock; arch_write_trylock()
185 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
186 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
188 #define _raw_spin_relax(lock) cpu_relax()
189 #define _raw_read_relax(lock) cpu_relax()
190 #define _raw_write_relax(lock) cpu_relax()
H A Dspinlock_types.h15 unsigned int lock; member in struct:__anon2172
H A Drwlock.h13 * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
28 #define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
42 " .section .text.lock,\"ax\" \n" \
61 " .section .text.lock,\"ax\" \n" \
88 " .section .text.lock,\"ax\" \n" \
107 " .section .text.lock,\"ax\" \n" \
/linux-4.1.27/arch/ia64/include/asm/
H A Dspinlock.h19 #define arch_spin_lock_init(x) ((x)->lock = 0)
23 * the queue, and the other indicating the current tail. The lock is acquired
40 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) __ticket_spin_lock() argument
42 int *p = (int *)&lock->lock, ticket, serve; __ticket_spin_lock()
60 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) __ticket_spin_trylock() argument
62 int tmp = ACCESS_ONCE(lock->lock); __ticket_spin_trylock()
65 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; __ticket_spin_trylock()
69 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) __ticket_spin_unlock() argument
71 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; __ticket_spin_unlock()
77 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) __ticket_spin_unlock_wait() argument
79 int *p = (int *)&lock->lock, ticket; __ticket_spin_unlock_wait()
91 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) __ticket_spin_is_locked() argument
93 long tmp = ACCESS_ONCE(lock->lock); __ticket_spin_is_locked()
98 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) __ticket_spin_is_contended() argument
100 long tmp = ACCESS_ONCE(lock->lock); __ticket_spin_is_contended()
105 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
107 return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK); arch_spin_value_unlocked()
110 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
112 return __ticket_spin_is_locked(lock); arch_spin_is_locked()
115 static inline int arch_spin_is_contended(arch_spinlock_t *lock) arch_spin_is_contended() argument
117 return __ticket_spin_is_contended(lock); arch_spin_is_contended()
121 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
123 __ticket_spin_lock(lock); arch_spin_lock()
126 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
128 return __ticket_spin_trylock(lock); arch_spin_trylock()
131 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
133 __ticket_spin_unlock(lock); arch_spin_unlock()
136 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, arch_spin_lock_flags() argument
139 arch_spin_lock(lock); arch_spin_lock_flags()
142 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
144 __ticket_spin_unlock_wait(lock); arch_spin_unlock_wait()
153 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) arch_read_lock_flags() argument
172 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) arch_read_lock_flags()
176 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
204 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) arch_write_lock_flags() argument
224 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT) arch_write_lock_flags()
283 arch_rwlock_t lock; arch_read_trylock() member in union:__anon1676
286 old.lock = new.lock = *x; arch_read_trylock()
287 old.lock.write_lock = new.lock.write_lock = 0; arch_read_trylock()
288 ++new.lock.read_counter; arch_read_trylock()
292 #define arch_spin_relax(lock) cpu_relax()
293 #define arch_read_relax(lock) cpu_relax()
294 #define arch_write_relax(lock) cpu_relax()
H A Dacenv.h23 ia64_acpi_acquire_global_lock(unsigned int *lock) ia64_acpi_acquire_global_lock() argument
27 old = *lock; ia64_acpi_acquire_global_lock()
29 val = ia64_cmpxchg4_acq(lock, new, old); ia64_acpi_acquire_global_lock()
35 ia64_acpi_release_global_lock(unsigned int *lock) ia64_acpi_release_global_lock() argument
39 old = *lock; ia64_acpi_release_global_lock()
41 val = ia64_cmpxchg4_acq(lock, new, old); ia64_acpi_release_global_lock()
H A Dspinlock_types.h9 volatile unsigned int lock; member in struct:__anon1677
/linux-4.1.27/arch/powerpc/include/asm/
H A Dspinlock.h6 * Simple spin lock operations.
31 #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
57 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
59 return lock.slock == 0; arch_spin_value_unlocked()
62 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
65 return !arch_spin_value_unlocked(*lock); arch_spin_is_locked()
69 * This returns the old value in the lock, so we succeeded
70 * in getting the lock if the return value is 0.
72 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) __arch_spin_trylock() argument
86 : "r" (token), "r" (&lock->slock) __arch_spin_trylock()
92 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
95 return __arch_spin_trylock(lock) == 0; arch_spin_trylock()
101 * there is no point spinning on a lock if the holder of the lock
104 * rest of our timeslice to the lock holder.
106 * So that we can tell which virtual processor is holding a lock,
107 * we put 0x80000000 | smp_processor_id() in the lock when it is
115 extern void __spin_yield(arch_spinlock_t *lock);
116 extern void __rw_yield(arch_rwlock_t *lock);
123 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
127 if (likely(__arch_spin_trylock(lock) == 0)) arch_spin_lock()
132 __spin_yield(lock); arch_spin_lock()
133 } while (unlikely(lock->slock != 0)); arch_spin_lock()
139 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) arch_spin_lock_flags() argument
145 if (likely(__arch_spin_trylock(lock) == 0)) arch_spin_lock_flags()
152 __spin_yield(lock); arch_spin_lock_flags()
153 } while (unlikely(lock->slock != 0)); arch_spin_lock_flags()
159 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
164 lock->slock = 0; arch_spin_unlock()
168 extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
170 #define arch_spin_unlock_wait(lock) \
171 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
181 * irq-safe write-lock, but readers can get non-irqsafe
185 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
186 #define arch_write_can_lock(rw) (!(rw)->lock)
197 * This returns the old value in the lock + 1,
198 * so we got a read lock if the return value is > 0.
214 : "r" (&rw->lock) __arch_read_trylock()
221 * This returns the old value in the lock,
222 * so we got the write lock if the return value is 0.
238 : "r" (token), "r" (&rw->lock) __arch_write_trylock()
253 } while (unlikely(rw->lock < 0)); arch_read_lock()
267 } while (unlikely(rw->lock != 0)); arch_write_lock()
295 : "r"(&rw->lock) arch_read_unlock()
303 rw->lock = 0; arch_write_unlock()
306 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
307 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
309 #define arch_spin_relax(lock) __spin_yield(lock)
310 #define arch_read_relax(lock) __rw_yield(lock)
311 #define arch_write_relax(lock) __rw_yield(lock)
H A Dspinlock_types.h15 volatile signed int lock; member in struct:__anon2332
/linux-4.1.27/drivers/clk/mmp/
H A Dclk-apbc.c33 spinlock_t *lock; member in struct:clk_apbc
46 if (apbc->lock) clk_apbc_prepare()
47 spin_lock_irqsave(apbc->lock, flags); clk_apbc_prepare()
55 if (apbc->lock) clk_apbc_prepare()
56 spin_unlock_irqrestore(apbc->lock, flags); clk_apbc_prepare()
60 if (apbc->lock) clk_apbc_prepare()
61 spin_lock_irqsave(apbc->lock, flags); clk_apbc_prepare()
67 if (apbc->lock) clk_apbc_prepare()
68 spin_unlock_irqrestore(apbc->lock, flags); clk_apbc_prepare()
73 if (apbc->lock) clk_apbc_prepare()
74 spin_lock_irqsave(apbc->lock, flags); clk_apbc_prepare()
80 if (apbc->lock) clk_apbc_prepare()
81 spin_unlock_irqrestore(apbc->lock, flags); clk_apbc_prepare()
93 if (apbc->lock) clk_apbc_unprepare()
94 spin_lock_irqsave(apbc->lock, flags); clk_apbc_unprepare()
102 if (apbc->lock) clk_apbc_unprepare()
103 spin_unlock_irqrestore(apbc->lock, flags); clk_apbc_unprepare()
107 if (apbc->lock) clk_apbc_unprepare()
108 spin_lock_irqsave(apbc->lock, flags); clk_apbc_unprepare()
114 if (apbc->lock) clk_apbc_unprepare()
115 spin_unlock_irqrestore(apbc->lock, flags); clk_apbc_unprepare()
125 unsigned int apbc_flags, spinlock_t *lock) mmp_clk_register_apbc()
144 apbc->lock = lock; mmp_clk_register_apbc()
123 mmp_clk_register_apbc(const char *name, const char *parent_name, void __iomem *base, unsigned int delay, unsigned int apbc_flags, spinlock_t *lock) mmp_clk_register_apbc() argument
H A Dclk-apmu.c27 spinlock_t *lock; member in struct:clk_apmu
36 if (apmu->lock) clk_apmu_enable()
37 spin_lock_irqsave(apmu->lock, flags); clk_apmu_enable()
42 if (apmu->lock) clk_apmu_enable()
43 spin_unlock_irqrestore(apmu->lock, flags); clk_apmu_enable()
54 if (apmu->lock) clk_apmu_disable()
55 spin_lock_irqsave(apmu->lock, flags); clk_apmu_disable()
60 if (apmu->lock) clk_apmu_disable()
61 spin_unlock_irqrestore(apmu->lock, flags); clk_apmu_disable()
70 void __iomem *base, u32 enable_mask, spinlock_t *lock) mmp_clk_register_apmu()
88 apmu->lock = lock; mmp_clk_register_apmu()
69 mmp_clk_register_apmu(const char *name, const char *parent_name, void __iomem *base, u32 enable_mask, spinlock_t *lock) mmp_clk_register_apmu() argument
H A Dclk-gate.c35 if (gate->lock) mmp_clk_gate_enable()
36 spin_lock_irqsave(gate->lock, flags); mmp_clk_gate_enable()
43 if (gate->lock) mmp_clk_gate_enable()
44 spin_unlock_irqrestore(gate->lock, flags); mmp_clk_gate_enable()
61 if (gate->lock) mmp_clk_gate_disable()
62 spin_lock_irqsave(gate->lock, flags); mmp_clk_gate_disable()
69 if (gate->lock) mmp_clk_gate_disable()
70 spin_unlock_irqrestore(gate->lock, flags); mmp_clk_gate_disable()
79 if (gate->lock) mmp_clk_gate_is_enabled()
80 spin_lock_irqsave(gate->lock, flags); mmp_clk_gate_is_enabled()
84 if (gate->lock) mmp_clk_gate_is_enabled()
85 spin_unlock_irqrestore(gate->lock, flags); mmp_clk_gate_is_enabled()
99 unsigned int gate_flags, spinlock_t *lock) mmp_clk_register_gate()
124 gate->lock = lock; mmp_clk_register_gate()
96 mmp_clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable, unsigned int gate_flags, spinlock_t *lock) mmp_clk_register_gate() argument
H A Dreset.c42 if (cell->lock) mmp_clk_reset_assert()
43 spin_lock_irqsave(cell->lock, flags); mmp_clk_reset_assert()
49 if (cell->lock) mmp_clk_reset_assert()
50 spin_unlock_irqrestore(cell->lock, flags); mmp_clk_reset_assert()
64 if (cell->lock) mmp_clk_reset_deassert()
65 spin_lock_irqsave(cell->lock, flags); mmp_clk_reset_deassert()
71 if (cell->lock) mmp_clk_reset_deassert()
72 spin_unlock_irqrestore(cell->lock, flags); mmp_clk_reset_deassert()
/linux-4.1.27/arch/x86/include/asm/
H A Dqrwlock.h8 static inline void queue_write_unlock(struct qrwlock *lock) queue_write_unlock() argument
11 ACCESS_ONCE(*(u8 *)&lock->cnts) = 0; queue_write_unlock()
H A Dspinlock.h15 * Simple spin lock operations. There are two variants, one clears IRQ's
39 /* How long a lock should spin before we consider blocking */
47 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) __ticket_enter_slowpath() argument
49 set_bit(0, (volatile unsigned long *)&lock->tickets.head); __ticket_enter_slowpath()
53 static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, __ticket_lock_spinning() argument
57 static inline void __ticket_unlock_kick(arch_spinlock_t *lock, __ticket_unlock_kick() argument
68 static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, __ticket_check_and_clear_slowpath() argument
80 cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); __ticket_check_and_clear_slowpath()
84 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) arch_spin_value_unlocked() argument
86 return __tickets_equal(lock.tickets.head, lock.tickets.tail); arch_spin_value_unlocked()
91 * the queue, and the other indicating the current tail. The lock is acquired
96 * We use an xadd covering *both* parts of the lock, to increment the tail and
102 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
106 inc = xadd(&lock->tickets, inc); arch_spin_lock()
114 inc.head = READ_ONCE(lock->tickets.head); arch_spin_lock()
119 __ticket_lock_spinning(lock, inc.tail); arch_spin_lock()
122 __ticket_check_and_clear_slowpath(lock, inc.head); arch_spin_lock()
124 barrier(); /* make sure nothing creeps before the lock is taken */ arch_spin_lock()
127 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
131 old.tickets = READ_ONCE(lock->tickets); arch_spin_trylock()
139 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; arch_spin_trylock()
142 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
150 head = xadd(&lock->tickets.head, TICKET_LOCK_INC); arch_spin_unlock()
154 __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); arch_spin_unlock()
157 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); arch_spin_unlock()
160 static inline int arch_spin_is_locked(arch_spinlock_t *lock) arch_spin_is_locked() argument
162 struct __raw_tickets tmp = READ_ONCE(lock->tickets); arch_spin_is_locked()
167 static inline int arch_spin_is_contended(arch_spinlock_t *lock) arch_spin_is_contended() argument
169 struct __raw_tickets tmp = READ_ONCE(lock->tickets); arch_spin_is_contended()
176 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, arch_spin_lock_flags() argument
179 arch_spin_lock(lock); arch_spin_lock_flags()
182 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
184 __ticket_t head = READ_ONCE(lock->tickets.head); arch_spin_unlock_wait()
187 struct __raw_tickets tmp = READ_ONCE(lock->tickets); arch_spin_unlock_wait()
207 * irq-safe write-lock, but readers can get non-irqsafe
216 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
217 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
219 #define arch_spin_relax(lock) cpu_relax()
220 #define arch_read_relax(lock) cpu_relax()
221 #define arch_write_relax(lock) cpu_relax()
H A Dcmpxchg.h42 #define __xchg_op(ptr, arg, op, lock) \
47 asm volatile (lock #op "b %b0, %1\n" \
52 asm volatile (lock #op "w %w0, %1\n" \
57 asm volatile (lock #op "l %0, %1\n" \
62 asm volatile (lock #op "q %q0, %1\n" \
73 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
85 #define __raw_cmpxchg(ptr, old, new, size, lock) \
94 asm volatile(lock "cmpxchgb %2,%1" \
103 asm volatile(lock "cmpxchgw %2,%1" \
112 asm volatile(lock "cmpxchgl %2,%1" \
121 asm volatile(lock "cmpxchgq %2,%1" \
137 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
165 #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
167 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
170 #define __add(ptr, inc, lock) \
175 asm volatile (lock "addb %b1, %0\n" \
180 asm volatile (lock "addw %w1, %0\n" \
185 asm volatile (lock "addl %1, %0\n" \
190 asm volatile (lock "addq %1, %0\n" \
203 * __add() takes a lock prefix
208 #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
/linux-4.1.27/arch/parisc/include/asm/
H A Dspinlock_types.h9 volatile unsigned int lock[4];
15 arch_spinlock_t lock; member in struct:__anon2209
H A Dspinlock.h15 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
65 * Readers use the lock to serialise their access to the counter (which
66 * records how many readers currently hold the lock).
72 * interrupted by some other code that wants to grab the same read lock */ arch_read_lock()
77 arch_spin_lock_flags(&rw->lock, flags); arch_read_lock()
79 arch_spin_unlock(&rw->lock); arch_read_lock()
84 * interrupted by some other code that wants to grab the same read lock */ arch_read_unlock()
89 arch_spin_lock_flags(&rw->lock, flags); arch_read_unlock()
91 arch_spin_unlock(&rw->lock); arch_read_unlock()
96 * interrupted by some other code that wants to grab the same read lock */ arch_read_trylock()
102 if (arch_spin_trylock(&rw->lock)) { arch_read_trylock()
104 arch_spin_unlock(&rw->lock); arch_read_trylock()
110 /* If write-locked, we fail to acquire the lock */ arch_read_trylock()
114 /* Wait until we have a realistic chance at the lock */ arch_read_trylock()
115 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) arch_read_trylock()
122 * interrupted by some other code that wants to read_trylock() this lock */ arch_write_lock()
128 arch_spin_lock_flags(&rw->lock, flags); arch_write_lock()
131 arch_spin_unlock(&rw->lock); arch_write_lock()
148 arch_spin_unlock(&rw->lock); arch_write_unlock()
152 * interrupted by some other code that wants to read_trylock() this lock */ arch_write_trylock()
159 if (arch_spin_trylock(&rw->lock)) { arch_write_trylock()
165 arch_spin_unlock(&rw->lock); arch_write_trylock()
175 * @lock: the rwlock in question.
184 * @lock: the rwlock in question.
191 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
192 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
/linux-4.1.27/sound/core/seq/
H A Dseq_lock.h10 /* initialize lock */
13 /* increment lock */
16 /* release lock */
20 void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
H A Dseq_timer.c61 spin_lock_init(&tmr->lock); snd_seq_timer_new()
95 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_defaults()
111 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_defaults()
128 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_reset()
130 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_reset()
148 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_interrupt()
150 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_interrupt()
170 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_interrupt()
185 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_set_tempo()
190 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_tempo()
203 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_set_ppq()
207 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_ppq()
214 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_ppq()
227 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_set_position_tick()
230 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_position_tick()
244 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_set_position_time()
246 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_position_time()
264 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_set_skew()
266 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_set_skew()
307 spin_lock_irq(&tmr->lock); snd_seq_timer_open()
309 spin_unlock_irq(&tmr->lock); snd_seq_timer_open()
321 spin_lock_irq(&tmr->lock); snd_seq_timer_close()
324 spin_unlock_irq(&tmr->lock); snd_seq_timer_close()
346 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_stop()
348 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_stop()
404 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_start()
406 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_start()
432 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_continue()
434 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_continue()
444 spin_lock_irqsave(&tmr->lock, flags); snd_seq_timer_get_cur_time()
460 spin_unlock_irqrestore(&tmr->lock, flags); snd_seq_timer_get_cur_time()
H A Dseq_fifo.c50 spin_lock_init(&f->lock); snd_seq_fifo_new()
102 spin_lock_irqsave(&f->lock, flags); snd_seq_fifo_clear()
107 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_clear()
132 spin_lock_irqsave(&f->lock, flags); snd_seq_fifo_event_in()
139 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_event_in()
183 spin_lock_irqsave(&f->lock, flags); snd_seq_fifo_cell_out()
187 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_cell_out()
192 spin_unlock_irq(&f->lock); snd_seq_fifo_cell_out()
194 spin_lock_irq(&f->lock); snd_seq_fifo_cell_out()
197 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_cell_out()
201 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_cell_out()
214 spin_lock_irqsave(&f->lock, flags); snd_seq_fifo_cell_putback()
218 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_cell_putback()
250 spin_lock_irqsave(&f->lock, flags); snd_seq_fifo_resize()
260 spin_unlock_irqrestore(&f->lock, flags); snd_seq_fifo_resize()
/linux-4.1.27/arch/alpha/lib/
H A Ddec_and_lock.c33 static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock) atomic_dec_and_lock_1() argument
36 spin_lock(lock); atomic_dec_and_lock_1()
39 spin_unlock(lock); atomic_dec_and_lock_1()
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_lock.c73 * Returns a weak pointer to the ldlm lock identified by a handle. Returned
74 * pointer cannot be dereferenced, as lock is not protected from concurrent
79 struct ldlm_lock *lock; osc_handle_ptr() local
81 lock = ldlm_handle2lock(handle); osc_handle_ptr()
82 if (lock != NULL) osc_handle_ptr()
83 LDLM_LOCK_PUT(lock); osc_handle_ptr()
84 return lock; osc_handle_ptr()
92 struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle); osc_lock_invariant() local
111 ergo(lock != NULL && olock != NULL, lock == olock) && osc_lock_invariant()
112 ergo(lock == NULL, olock == NULL))) osc_lock_invariant()
122 * DLM lock is destroyed only after we have seen cancellation osc_lock_invariant()
171 /* Must get the value under the lock to avoid possible races. */ osc_lock_detach()
224 * Move lock into OLS_RELEASED state before calling osc_lock_unuse()
226 * (that always happens e.g., for liblustre) sees that lock is osc_lock_unuse()
245 * thread that requested a lock was killed (and released a reference osc_lock_fini()
246 * to the lock), before reply from a server was received. In this case osc_lock_fini()
247 * lock is destroyed immediately after upcall. osc_lock_fini()
258 const struct cl_lock *lock, osc_lock_build_policy()
261 const struct cl_lock_descr *d = &lock->cll_descr; osc_lock_build_policy()
283 * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
296 struct cl_lock *lock = olck->ols_cl.cls_lock; osc_ast_data_get() local
298 * If osc_lock holds a reference on ldlm lock, return it even osc_ast_data_get()
306 if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) { osc_ast_data_get()
307 cl_lock_get_trust(lock); osc_ast_data_get()
308 lu_ref_add_atomic(&lock->cll_reference, osc_ast_data_get()
320 struct cl_lock *lock; osc_ast_data_put() local
322 lock = olck->ols_cl.cls_lock; osc_ast_data_put()
323 lu_ref_del(&lock->cll_reference, "ast", current); osc_ast_data_put()
324 cl_lock_put(env, lock); osc_ast_data_put()
328 * Updates object attributes from a lock value block (lvb) received together
329 * with the DLM lock reply from the server. Copy of osc_update_enqueue()
332 * This can be optimized to not update attributes when lock is a result of a
335 * Called under lock and resource spin-locks.
364 /* re-grab LVB from a dlm lock under DLM spin-locks. */ osc_lock_lvb_update()
367 /* Extend KMS up to the end of this lock and no further osc_lock_lvb_update()
368 * A lock on [x,y] means a KMS of up to y + 1 bytes! */ osc_lock_lvb_update()
372 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu, kms=%llu", osc_lock_lvb_update()
377 LDLM_DEBUG(dlmlock, "lock acquired, setting rss=%llu; leaving kms=%llu, end=%llu", osc_lock_lvb_update()
395 * Called when a lock is granted, from an upcall (when server returned a
396 * granted lock), or from completion AST, when server returned a blocked lock.
398 * Called under lock and resource spin-locks, that are released temporarily
405 struct cl_lock *lock; osc_lock_granted() local
411 lock = olck->ols_cl.cls_lock; osc_lock_granted()
414 descr->cld_obj = lock->cll_descr.cld_obj; osc_lock_granted()
422 * tell upper layers the extent of the lock that was actually osc_lock_granted()
429 * to take a semaphore on a parent lock. This is safe, because osc_lock_granted()
434 cl_lock_modify(env, lock, descr); osc_lock_granted()
435 cl_lock_signal(env, lock); osc_lock_granted()
458 * (osc_ldlm_completion_ast()) comes later and finishes lock osc_lock_upcall0()
472 /* lock reference taken by ldlm_handle2lock_long() is owned by osc_lock_upcall0()
481 * lock.
487 struct cl_lock *lock = slice->cls_lock; osc_lock_upcall() local
495 cl_lock_mutex_get(env, lock); osc_lock_upcall()
497 LASSERT(lock->cll_state >= CLS_QUEUING); osc_lock_upcall()
530 /* This is a tolerable error, turn this lock into osc_lock_upcall()
531 * lockless lock. osc_lock_upcall()
536 /* Change this lock to ldlmlock-less lock. */ osc_lock_upcall()
542 cl_lock_delete(env, lock); osc_lock_upcall()
550 * lock upcall(). So update the lock status according osc_lock_upcall()
553 lock->cll_flags |= CLF_FROM_UPCALL; osc_lock_upcall()
554 cl_wait_try(env, lock); osc_lock_upcall()
555 lock->cll_flags &= ~CLF_FROM_UPCALL; osc_lock_upcall()
559 cl_lock_signal(env, lock); osc_lock_upcall()
560 /* del user for lock upcall cookie */ osc_lock_upcall()
561 cl_unuse_try(env, lock); osc_lock_upcall()
563 /* del user for lock upcall cookie */ osc_lock_upcall()
564 cl_lock_user_del(env, lock); osc_lock_upcall()
565 cl_lock_error(env, lock, rc); osc_lock_upcall()
569 cl_lock_hold_release(env, lock, "upcall", lock); osc_lock_upcall()
570 cl_lock_mutex_put(env, lock); osc_lock_upcall()
572 lu_ref_del(&lock->cll_reference, "upcall", lock); osc_lock_upcall()
575 cl_lock_put(env, lock); osc_lock_upcall()
592 struct cl_lock *lock = olck->ols_cl.cls_lock; osc_lock_blocking() local
600 * is sent for a failed lock. osc_lock_blocking()
606 * Move osc_lock into OLS_BLOCKED before canceling the lock, osc_lock_blocking()
612 * cancel and destroy lock at least once no matter how blocking ast is osc_lock_blocking()
616 cl_lock_cancel(env, lock); osc_lock_blocking()
617 cl_lock_delete(env, lock); osc_lock_blocking()
629 struct cl_lock *lock; osc_dlm_blocking_ast0() local
638 lock = olck->ols_cl.cls_lock; osc_dlm_blocking_ast0()
639 cl_lock_mutex_get(env, lock); osc_dlm_blocking_ast0()
643 cl_lock_signal(env, lock); osc_dlm_blocking_ast0()
648 * sleeping for lock mutex, but olck is pinned in memory. osc_dlm_blocking_ast0()
655 * DLM lock is never destroyed and stuck in osc_dlm_blocking_ast0()
669 cl_lock_mutex_put(env, lock); osc_dlm_blocking_ast0()
673 * DLM lock exists, but there is no cl_lock attached to it. osc_dlm_blocking_ast0()
691 * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
692 * some other lock, or is canceled. This function is installed as a
698 * \param dlmlock lock for which ast occurred.
700 * \param new description of a conflicting lock in case of blocking ast.
710 * lock due to lock lru pressure, or explicit user request to purge
714 * us that dlmlock conflicts with another lock that some client is
724 * - client cancels lock voluntary (e.g., as a part of early cancellation):
779 struct cl_lock *lock; osc_ldlm_completion_ast() local
790 lock = olck->ols_cl.cls_lock; osc_ldlm_completion_ast()
791 cl_lock_mutex_get(env, lock); osc_ldlm_completion_ast()
794 * to lock->l_lvb_data, store it in osc_lock. osc_ldlm_completion_ast()
816 CL_LOCK_DEBUG(D_ERROR, env, lock, osc_ldlm_completion_ast()
818 cl_lock_error(env, lock, dlmrc); osc_ldlm_completion_ast()
820 cl_lock_mutex_put(env, lock); osc_ldlm_completion_ast()
835 struct cl_lock *lock; osc_ldlm_glimpse_ast() local
849 * reference to a lock, and it can only be released in osc_ldlm_glimpse_ast()
854 lock = olck->ols_cl.cls_lock; osc_ldlm_glimpse_ast()
860 * cl_lock_mutex_get(env, lock); */ osc_ldlm_glimpse_ast()
868 obj = lock->cll_descr.cld_obj; osc_ldlm_glimpse_ast()
905 struct osc_lock *lock, osc_lock_build_einfo()
914 * future, client might choose to enqueue LCK_PW lock for osc_lock_build_einfo()
924 einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ osc_lock_build_einfo()
928 * Determine if the lock should be converted into a lockless lock.
931 * - if the lock has an explicit requirement for a non-lockless lock;
932 * - if the io lock request type ci_lockreq;
934 * - special treat to truncate lockless lock
1003 * covered by locks other than lockless IO lock, and, hence, are not
1009 struct cl_lock *lock = olck->ols_cl.cls_lock; osc_lock_enqueue_wait() local
1010 struct cl_lock_descr *descr = &lock->cll_descr; osc_lock_enqueue_wait()
1017 LASSERT(cl_lock_is_mutexed(lock)); osc_lock_enqueue_wait()
1019 /* make it enqueue anyway for glimpse lock, because we actually osc_lock_enqueue_wait()
1029 if (scan == lock) osc_lock_enqueue_wait()
1040 /* We're not supposed to give up group lock. */ osc_lock_enqueue_wait()
1050 * a lockless lock, for example: osc_lock_enqueue_wait()
1051 * imagine that client has PR lock on [0, 1000], and thread T0 osc_lock_enqueue_wait()
1065 if (lock->cll_descr.cld_mode == CLM_GROUP) { osc_lock_enqueue_wait()
1066 /* we want a group lock but a previous lock request osc_lock_enqueue_wait()
1070 CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n", osc_lock_enqueue_wait()
1071 lock, conflict); osc_lock_enqueue_wait()
1075 CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n", osc_lock_enqueue_wait()
1076 lock, conflict); osc_lock_enqueue_wait()
1077 LASSERT(lock->cll_conflict == NULL); osc_lock_enqueue_wait()
1079 lock); osc_lock_enqueue_wait()
1080 lock->cll_conflict = conflict; osc_lock_enqueue_wait()
1096 * when lock is received either after a local cached ldlm lock is matched, or
1106 struct cl_lock *lock = ols->ols_cl.cls_lock; osc_lock_enqueue() local
1109 LASSERT(cl_lock_is_mutexed(lock)); osc_lock_enqueue()
1113 LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), osc_lock_enqueue()
1114 "lock = %p, ols = %p\n", lock, ols); osc_lock_enqueue()
1125 /* lock will be passed as upcall cookie, osc_lock_enqueue()
1127 cl_lock_hold_add(env, lock, "upcall", lock); osc_lock_enqueue()
1128 /* a user for lock also */ osc_lock_enqueue()
1129 cl_lock_user_add(env, lock); osc_lock_enqueue()
1138 osc_lock_build_policy(env, lock, policy); osc_lock_enqueue()
1147 cl_lock_user_del(env, lock); osc_lock_enqueue()
1148 cl_lock_unhold(env, lock, "upcall", lock); osc_lock_enqueue()
1167 struct cl_lock *lock = olck->ols_cl.cls_lock; osc_lock_wait() local
1175 if (lock->cll_flags & CLF_FROM_UPCALL) osc_lock_wait()
1182 LASSERT(lock->cll_error); osc_lock_wait()
1183 return lock->cll_error; osc_lock_wait()
1201 lock->cll_error == 0, olck->ols_lock != NULL)); osc_lock_wait()
1203 return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; osc_lock_wait()
1208 * lock.
1219 * Atomically check for LDLM_FL_CBPENDING and addref a lock if this osc_lock_use()
1227 struct cl_lock *lock; osc_lock_use() local
1235 lock = slice->cls_lock; osc_lock_use()
1236 LASSERT(lock->cll_state == CLS_INTRANSIT); osc_lock_use()
1237 LASSERT(lock->cll_users > 0); osc_lock_use()
1239 * lock.*/ osc_lock_use()
1248 struct cl_lock *lock = ols->ols_cl.cls_lock; osc_lock_flush() local
1256 struct cl_lock_descr *descr = &lock->cll_descr; osc_lock_flush()
1264 "lock %p: %d pages were %s.\n", lock, result, osc_lock_flush()
1270 rc = cl_lock_discard_pages(env, lock); osc_lock_flush()
1286 * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
1288 * with some other lock some where in the cluster. This function does the
1291 * - invalidates all pages protected by this lock (after sending dirty
1294 * - decref's underlying ldlm lock;
1296 * - cancels ldlm lock (ldlm_cli_cancel()).
1301 struct cl_lock *lock = slice->cls_lock; osc_lock_cancel() local
1307 LASSERT(cl_lock_is_mutexed(lock)); osc_lock_cancel()
1330 CL_LOCK_DEBUG(D_ERROR, env, lock, osc_lock_cancel()
1331 "lock %p cancel failure with error(%d)\n", osc_lock_cancel()
1332 lock, result); osc_lock_cancel()
1368 * This assumes that lock always enters CLS_HELD (from some other state) in
1369 * the same IO context as one that requested the lock. This should not be a
1377 struct osc_lock *lock = cl2osc_lock(slice); osc_lock_state() local
1380 * XXX multiple io contexts can use the lock at the same time. osc_lock_state()
1382 LINVRNT(osc_lock_invariant(lock)); osc_lock_state()
1386 LASSERT(lock->ols_owner == NULL); osc_lock_state()
1387 lock->ols_owner = oio; osc_lock_state()
1389 lock->ols_owner = NULL; osc_lock_state()
1395 struct osc_lock *lock = cl2osc_lock(slice); osc_lock_print() local
1398 * XXX print ldlm lock and einfo properly. osc_lock_print()
1401 lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, osc_lock_print()
1402 lock->ols_state, lock->ols_owner); osc_lock_print()
1403 osc_lvb_print(env, cookie, p, &lock->ols_lvb); osc_lock_print()
1425 * Note: the QUEUED lock can't be matched here, otherwise osc_lock_fits_into()
1428 * P1: enqueued read lock, create sublock1 osc_lock_fits_into()
1429 * P2: enqueued write lock, create sublock2(conflicted osc_lock_fits_into()
1431 * P1: Grant read lock. osc_lock_fits_into()
1432 * P1: enqueued glimpse lock(with holding sublock1_read), osc_lock_fits_into()
1442 * If the lock hasn't ever enqueued, it can't be matched osc_lock_fits_into()
1472 struct cl_lock *lock = slice->cls_lock; osc_lock_lockless_unuse() local
1477 cl_lock_cancel(env, lock); osc_lock_lockless_unuse()
1478 cl_lock_delete(env, lock); osc_lock_lockless_unuse()
1490 CERROR("Pages for lockless lock %p were not purged(%d)\n", osc_lock_lockless_cancel()
1499 struct cl_lock *lock = olck->ols_cl.cls_lock; osc_lock_lockless_wait() local
1504 return lock->cll_error; osc_lock_lockless_wait()
1511 struct osc_lock *lock = cl2osc_lock(slice); osc_lock_lockless_state() local
1513 LINVRNT(osc_lock_invariant(lock)); osc_lock_lockless_state()
1517 LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio)); osc_lock_lockless_state()
1518 lock->ols_owner = oio; osc_lock_lockless_state()
1520 /* set the io to be lockless if this lock is for io's osc_lock_lockless_state()
1532 struct osc_lock *lock = cl2osc_lock(slice); osc_lock_lockless_fits_into() local
1537 /* lockless lock should only be used by its owning io. b22147 */ osc_lock_lockless_fits_into()
1538 return (lock->ols_owner == osc_env_io(env)); osc_lock_lockless_fits_into()
1553 struct cl_object *obj, struct cl_lock *lock, osc_lock_init()
1561 __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_init()
1563 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); osc_lock_init()
1574 cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops); osc_lock_init()
1577 /* try to convert this lock to a lockless lock */ osc_lock_init()
1582 LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", osc_lock_init()
1583 lock, clk, clk->ols_flags); osc_lock_init()
1600 * doesn't matter because in the worst case we don't cancel a lock osc_dlm_lock_pageref()
257 osc_lock_build_policy(const struct lu_env *env, const struct cl_lock *lock, ldlm_policy_data_t *policy) osc_lock_build_policy() argument
903 osc_lock_build_einfo(const struct lu_env *env, const struct cl_lock *clock, struct osc_lock *lock, struct ldlm_enqueue_info *einfo) osc_lock_build_einfo() argument
1552 osc_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *unused) osc_lock_init() argument
/linux-4.1.27/fs/btrfs/
H A Dlocking.c30 * if we currently have a spinning reader or writer lock
37 * no lock is required. The lock owner may change if btrfs_set_lock_blocking_rw()
38 * we have a read lock, but it won't change to or away btrfs_set_lock_blocking_rw()
39 * from us. If we have the write lock, we are the owner btrfs_set_lock_blocking_rw()
50 write_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
57 read_unlock(&eb->lock); btrfs_set_lock_blocking_rw()
63 * if we currently have a blocking lock, take the spinlock
69 * no lock is required. The lock owner may change if btrfs_clear_lock_blocking_rw()
70 * we have a read lock, but it won't change to or away btrfs_clear_lock_blocking_rw()
71 * from us. If we have the write lock, we are the owner btrfs_clear_lock_blocking_rw()
79 write_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
87 read_lock(&eb->lock); btrfs_clear_lock_blocking_rw()
97 * take a spinning read lock. This will wait for any blocking
106 read_lock(&eb->lock); btrfs_tree_read_lock()
111 * an additional read lock to be added because it's for the same btrfs_tree_read_lock()
117 read_unlock(&eb->lock); btrfs_tree_read_lock()
121 read_unlock(&eb->lock); btrfs_tree_read_lock()
131 * take a spinning read lock.
132 * returns 1 if we get the read lock and 0 if we don't
140 read_lock(&eb->lock); btrfs_tree_read_lock_atomic()
142 read_unlock(&eb->lock); btrfs_tree_read_lock_atomic()
151 * returns 1 if we get the read lock and 0 if we don't
159 if (!read_trylock(&eb->lock)) btrfs_try_tree_read_lock()
163 read_unlock(&eb->lock); btrfs_try_tree_read_lock()
172 * returns 1 if we get the read lock and 0 if we don't
181 write_lock(&eb->lock); btrfs_try_tree_write_lock()
184 write_unlock(&eb->lock); btrfs_try_tree_write_lock()
194 * drop a spinning read lock
199 * if we're nested, we have the write lock. No new locking btrfs_tree_read_unlock()
200 * is needed as long as we are the lock owner. btrfs_tree_read_unlock()
202 * field only matters to the lock owner. btrfs_tree_read_unlock()
212 read_unlock(&eb->lock); btrfs_tree_read_unlock()
216 * drop a blocking read lock
221 * if we're nested, we have the write lock. No new locking btrfs_tree_read_unlock_blocking()
222 * is needed as long as we are the lock owner. btrfs_tree_read_unlock_blocking()
224 * field only matters to the lock owner. btrfs_tree_read_unlock_blocking()
239 * take a spinning write lock. This will wait for both
247 write_lock(&eb->lock); btrfs_tree_lock()
249 write_unlock(&eb->lock); btrfs_tree_lock()
255 write_unlock(&eb->lock); btrfs_tree_lock()
267 * drop a spinning or a blocking write lock.
288 write_unlock(&eb->lock); btrfs_tree_unlock()
/linux-4.1.27/drivers/gpu/drm/via/
H A Dvia_video.c40 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; via_init_futex()
51 volatile int *lock; via_release_futex() local
57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); via_release_futex()
58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) { via_release_futex()
59 if (_DRM_LOCK_IS_HELD(*lock) via_release_futex()
60 && (*lock & _DRM_LOCK_CONT)) { via_release_futex()
63 *lock = 0; via_release_futex()
71 volatile int *lock; via_decoder_futex() local
78 if (fx->lock >= VIA_NR_XVMC_LOCKS) via_decoder_futex()
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); via_decoder_futex()
85 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], via_decoder_futex()
86 (fx->ms / 10) * (HZ / 100), *lock != fx->val); via_decoder_futex()
89 wake_up(&(dev_priv->decoder_queue[fx->lock])); via_decoder_futex()
/linux-4.1.27/arch/tile/lib/
H A Dspinlock_64.c24 * This is important when we are spinning waiting for the lock.
26 static inline u32 arch_spin_read_noalloc(void *lock) arch_spin_read_noalloc() argument
28 return atomic_cmpxchg((atomic_t *)lock, -1, -1); arch_spin_read_noalloc()
35 void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket) arch_spin_lock_slow() argument
38 __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW); arch_spin_lock_slow()
43 u32 val = arch_spin_read_noalloc(lock); arch_spin_lock_slow()
53 * Check the lock to see if it is plausible, and try to get it with cmpxchg().
55 int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
57 u32 val = arch_spin_read_noalloc(lock); arch_spin_trylock()
60 return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW) arch_spin_trylock()
65 void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
68 while (arch_spin_is_locked(lock)) arch_spin_unlock_wait()
74 * If the read lock fails due to a writer, we retry periodically
83 val = __insn_fetchaddgez4(&rw->lock, 1); __read_lock_failed()
92 * issuing periodic fetchor instructions, till we get the lock.
99 val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); __write_lock_failed()
101 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); __write_lock_failed()
H A Dspinlock_32.c22 void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
28 while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1) arch_spin_lock()
31 /* Increment the next ticket number, implicitly releasing tns lock. */ arch_spin_lock()
32 lock->next_ticket = my_ticket + TICKET_QUANTUM; arch_spin_lock()
35 while ((delta = my_ticket - lock->current_ticket) != 0) arch_spin_lock()
40 int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
45 * will lock it momentarily anyway. arch_spin_trylock()
47 int my_ticket = __insn_tns((void *)&lock->next_ticket); arch_spin_trylock()
49 if (my_ticket == lock->current_ticket) { arch_spin_trylock()
50 /* Not currently locked, so lock it by keeping this ticket. */ arch_spin_trylock()
51 lock->next_ticket = my_ticket + TICKET_QUANTUM; arch_spin_trylock()
58 lock->next_ticket = my_ticket; arch_spin_trylock()
65 void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
68 while (arch_spin_is_locked(lock)) arch_spin_unlock_wait()
96 * We can get the read lock if everything but the reader bits (which
101 * preserve the semantic that the same read lock can be acquired in an
108 val = __insn_tns((int *)&rwlock->lock); arch_read_trylock()
111 rwlock->lock = val; arch_read_trylock()
117 rwlock->lock = val; arch_read_trylock()
124 * Spin doing arch_read_trylock() until we acquire the lock.
126 * a writer could instead take a ticket lock (just like a writer would),
143 mb(); /* guarantee anything modified under the lock is visible */ arch_read_unlock()
146 val = __insn_tns((int *)&rwlock->lock); arch_read_unlock()
148 rwlock->lock = val - (1 << _RD_COUNT_SHIFT); arch_read_unlock()
160 * arch_read_lock) since we should never use a bare write lock where
172 u32 val = __insn_tns((int *)&rwlock->lock); arch_write_lock()
175 rwlock->lock = 1 << _WR_NEXT_SHIFT; arch_write_lock()
187 rwlock->lock = val; arch_write_lock()
190 val = __insn_tns((int *)&rwlock->lock); arch_write_lock()
194 rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); arch_write_lock()
204 /* Delay based on how many lock-holders are still out there. */ arch_write_lock()
212 while ((val = rwlock->lock) & 1) arch_write_lock()
220 u32 val = __insn_tns((int *)&rwlock->lock); arch_write_trylock()
224 * or active readers, we can't take the lock, so give up. arch_write_trylock()
228 rwlock->lock = val; arch_write_trylock()
233 rwlock->lock = 1 << _WR_NEXT_SHIFT; arch_write_trylock()
242 mb(); /* guarantee anything modified under the lock is visible */ arch_write_unlock()
243 val = __insn_tns((int *)&rwlock->lock); arch_write_unlock()
245 rwlock->lock = 0; arch_write_unlock()
251 val = __insn_tns((int *)&rwlock->lock); arch_write_unlock()
257 rwlock->lock = val; arch_write_unlock()
/linux-4.1.27/include/uapi/linux/
H A Ddlmconstants.h43 * Do not queue the lock request on the wait queue if it cannot be granted
44 * immediately. If the lock cannot be granted because of this flag, DLM will
46 * dlm_lock and -EAGAIN in the lock status block when the AST is executed.
50 * Used to cancel a pending lock request or conversion. A converting lock is
55 * Indicates a lock conversion request. For conversions the name and namelen
56 * are ignored and the lock ID in the LKSB is used to identify the lock.
60 * Requests DLM to return the current contents of the lock value block in the
61 * lock status block. When this flag is set in a lock conversion from PW or EX
62 * modes, DLM assigns the value specified in the lock status block to the lock
63 * value block of the lock resource. The LVB is a DLM_LVB_LEN size array
73 * Invalidate the lock value block.
78 * granted mode of a converting lock to NL. The DLM_SBF_DEMOTED flag is
83 * Only relevant to locks originating in userspace. A persistent lock will not
84 * be removed if the process holding the lock exits.
88 * Do not cancel the lock if it gets into conversion deadlock.
89 * Exclude this lock from being monitored due to DLM_LSFL_TIMEWARN.
97 * Used only with new requests for NL mode locks. Tells the lock manager
98 * to grant the lock, ignoring other locks in convert and wait queues.
108 * Add a lock to the head of the convert or wait queue rather than the tail.
112 * Disregard the standard grant order rules and grant a lock as soon as it
117 * Acquire an orphan lock.
121 * If the requested mode cannot be granted immediately, try to grant the lock
131 * Unlock the lock even if it is converting or waiting or has sublocks.
H A Ddlm.h31 * Use this structure to specify the contents of the lock value block. For a
32 * conversion request, this structure is used to specify the lock ID of the
33 * lock. DLM writes the status of the lock request and the lock ID assigned
34 * to the request in the lock status block.
36 * sb_lkid: the returned lock ID. It is set on new (non-conversion) requests.
39 * sb_lvbptr: saves or returns the contents of the lock's LVB according to rules
42 * sb_flags: DLM_SBF_DEMOTED is returned if in the process of promoting a lock,
46 * sb_status: the returned status of the lock request set prior to AST
49 * 0 if lock request was successful
54 * -ETIMEDOUT if the lock request was canceled due to a timeout
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-spinlock.h61 * @lock: Lock to initialize
63 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock) cvmx_spinlock_init() argument
65 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL; cvmx_spinlock_init()
71 * @lock: Lock to check
74 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock) cvmx_spinlock_locked() argument
76 return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL; cvmx_spinlock_locked()
80 * Releases lock
82 * @lock: pointer to lock structure
84 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock) cvmx_spinlock_unlock() argument
87 lock->value = 0; cvmx_spinlock_unlock()
92 * Attempts to take the lock, but does not spin if lock is not available.
93 * May take some time to acquire the lock even if it is available
96 * @lock: pointer to lock structure
98 * Returns 0: lock successfully taken
99 * 1: lock not taken, held by someone else
103 static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock) cvmx_spinlock_trylock() argument
109 /* if lock held, fail immediately */ cvmx_spinlock_trylock()
117 [val] "+m"(lock->value), [tmp] "=&r"(tmp) cvmx_spinlock_trylock()
124 * Gets lock, spins until lock is taken
126 * @lock: pointer to lock structure
128 static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock) cvmx_spinlock_lock() argument
140 [val] "+m"(lock->value), [tmp] "=&r"(tmp) cvmx_spinlock_lock()
149 * compact data structures as only 1 bit is consumed for the lock.
154 * Gets lock, spins until lock is taken
156 * word used for the lock.
159 * @word: word to lock bit 31 of
183 * Attempts to get lock, returns immediately with success/failure
185 * word used for the lock.
188 * @word: word to lock bit 31 of
189 * Returns 0: lock successfully taken
190 * 1: lock not taken, held by someone else
200 /* if lock held, fail immediately */ cvmx_spinlock_bit_trylock()
217 * Releases bit lock
219 * Unconditionally clears bit 31 of the lock word. Note that this is
221 * of the bits in the word are protected by the lock.
/linux-4.1.27/drivers/hwspinlock/
H A Dhwspinlock_internal.h29 * @trylock: make a single attempt to take the lock. returns 0 on
31 * @unlock: release the lock. always succeed. may _not_ sleep.
33 * core while spinning on a lock, between two successive
37 int (*trylock)(struct hwspinlock *lock);
38 void (*unlock)(struct hwspinlock *lock);
39 void (*relax)(struct hwspinlock *lock);
44 * @bank: the hwspinlock_device structure which owns this lock
45 * @lock: initialized and used by hwspinlock core
50 spinlock_t lock; member in struct:hwspinlock
58 * @base_id: id index of the first lock in this device
60 * @lock: dynamically allocated array of 'struct hwspinlock'
67 struct hwspinlock lock[0]; member in struct:hwspinlock_device
72 int local_id = hwlock - &hwlock->bank->lock[0]; hwlock_to_id()
/linux-4.1.27/drivers/base/
H A Dmap.c26 int (*lock)(dev_t, void *); member in struct:kobj_map::probe
29 struct mutex *lock; member in struct:kobj_map
34 int (*lock)(dev_t, void *), void *data) kobj_map()
51 p->lock = lock; kobj_map()
56 mutex_lock(domain->lock); kobj_map()
64 mutex_unlock(domain->lock); kobj_map()
78 mutex_lock(domain->lock); kobj_unmap()
91 mutex_unlock(domain->lock); kobj_unmap()
102 mutex_lock(domain->lock); kobj_lookup()
119 if (p->lock && p->lock(dev, data) < 0) { kobj_lookup()
123 mutex_unlock(domain->lock); kobj_lookup()
131 mutex_unlock(domain->lock); kobj_lookup()
135 struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock) kobj_map_init() argument
152 p->lock = lock; kobj_map_init()
32 kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, struct module *module, kobj_probe_t *probe, int (*lock)(dev_t, void *), void *data) kobj_map() argument
/linux-4.1.27/fs/lockd/
H A Dxdr4.c4 * XDR support for lockd and the lock client.
106 nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) nlm4_decode_lock() argument
108 struct file_lock *fl = &lock->fl; nlm4_decode_lock()
112 if (!(p = xdr_decode_string_inplace(p, &lock->caller, nlm4_decode_lock()
113 &lock->len, NLM_MAXSTRLEN)) nlm4_decode_lock()
114 || !(p = nlm4_decode_fh(p, &lock->fh)) nlm4_decode_lock()
115 || !(p = nlm4_decode_oh(p, &lock->oh))) nlm4_decode_lock()
117 lock->svid = ntohl(*p++); nlm4_decode_lock()
121 fl->fl_pid = (pid_t)lock->svid; nlm4_decode_lock()
151 struct file_lock *fl = &resp->lock.fl; nlm4_encode_testres()
154 *p++ = htonl(resp->lock.svid); nlm4_encode_testres()
157 if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) nlm4_encode_testres()
169 resp->status, (int)resp->lock.svid, fl->fl_type, nlm4_encode_testres()
190 if (!(p = nlm4_decode_lock(p, &argp->lock))) nlm4svc_decode_testargs()
193 argp->lock.fl.fl_type = F_WRLCK; nlm4svc_decode_testargs()
215 if (!(p = nlm4_decode_lock(p, &argp->lock))) nlm4svc_decode_lockargs()
218 argp->lock.fl.fl_type = F_WRLCK; nlm4svc_decode_lockargs()
235 if (!(p = nlm4_decode_lock(p, &argp->lock))) nlm4svc_decode_cancargs()
238 argp->lock.fl.fl_type = F_WRLCK; nlm4svc_decode_cancargs()
246 || !(p = nlm4_decode_lock(p, &argp->lock))) nlm4svc_decode_unlockargs()
248 argp->lock.fl.fl_type = F_UNLCK; nlm4svc_decode_unlockargs()
255 struct nlm_lock *lock = &argp->lock; nlm4svc_decode_shareargs() local
257 memset(lock, 0, sizeof(*lock)); nlm4svc_decode_shareargs()
258 locks_init_lock(&lock->fl); nlm4svc_decode_shareargs()
259 lock->svid = ~(u32) 0; nlm4svc_decode_shareargs()
260 lock->fl.fl_pid = (pid_t)lock->svid; nlm4svc_decode_shareargs()
263 || !(p = xdr_decode_string_inplace(p, &lock->caller, nlm4svc_decode_shareargs()
264 &lock->len, NLM_MAXSTRLEN)) nlm4svc_decode_shareargs()
265 || !(p = nlm4_decode_fh(p, &lock->fh)) nlm4svc_decode_shareargs()
266 || !(p = nlm4_decode_oh(p, &lock->oh))) nlm4svc_decode_shareargs()
295 struct nlm_lock *lock = &argp->lock; nlm4svc_decode_notify() local
297 if (!(p = xdr_decode_string_inplace(p, &lock->caller, nlm4svc_decode_notify()
298 &lock->len, NLM_MAXSTRLEN))) nlm4svc_decode_notify()
H A Dxdr.c4 * XDR support for lockd and the lock client.
114 nlm_decode_lock(__be32 *p, struct nlm_lock *lock) nlm_decode_lock() argument
116 struct file_lock *fl = &lock->fl; nlm_decode_lock()
119 if (!(p = xdr_decode_string_inplace(p, &lock->caller, nlm_decode_lock()
120 &lock->len, nlm_decode_lock()
122 || !(p = nlm_decode_fh(p, &lock->fh)) nlm_decode_lock()
123 || !(p = nlm_decode_oh(p, &lock->oh))) nlm_decode_lock()
125 lock->svid = ntohl(*p++); nlm_decode_lock()
129 fl->fl_pid = (pid_t)lock->svid; nlm_decode_lock()
158 struct file_lock *fl = &resp->lock.fl; nlm_encode_testres()
161 *p++ = htonl(resp->lock.svid); nlm_encode_testres()
164 if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) nlm_encode_testres()
193 if (!(p = nlm_decode_lock(p, &argp->lock))) nlmsvc_decode_testargs()
196 argp->lock.fl.fl_type = F_WRLCK; nlmsvc_decode_testargs()
218 if (!(p = nlm_decode_lock(p, &argp->lock))) nlmsvc_decode_lockargs()
221 argp->lock.fl.fl_type = F_WRLCK; nlmsvc_decode_lockargs()
238 if (!(p = nlm_decode_lock(p, &argp->lock))) nlmsvc_decode_cancargs()
241 argp->lock.fl.fl_type = F_WRLCK; nlmsvc_decode_cancargs()
249 || !(p = nlm_decode_lock(p, &argp->lock))) nlmsvc_decode_unlockargs()
251 argp->lock.fl.fl_type = F_UNLCK; nlmsvc_decode_unlockargs()
258 struct nlm_lock *lock = &argp->lock; nlmsvc_decode_shareargs() local
260 memset(lock, 0, sizeof(*lock)); nlmsvc_decode_shareargs()
261 locks_init_lock(&lock->fl); nlmsvc_decode_shareargs()
262 lock->svid = ~(u32) 0; nlmsvc_decode_shareargs()
263 lock->fl.fl_pid = (pid_t)lock->svid; nlmsvc_decode_shareargs()
266 || !(p = xdr_decode_string_inplace(p, &lock->caller, nlmsvc_decode_shareargs()
267 &lock->len, NLM_MAXSTRLEN)) nlmsvc_decode_shareargs()
268 || !(p = nlm_decode_fh(p, &lock->fh)) nlmsvc_decode_shareargs()
269 || !(p = nlm_decode_oh(p, &lock->oh))) nlmsvc_decode_shareargs()
298 struct nlm_lock *lock = &argp->lock; nlmsvc_decode_notify() local
300 if (!(p = xdr_decode_string_inplace(p, &lock->caller, nlmsvc_decode_notify()
301 &lock->len, NLM_MAXSTRLEN))) nlmsvc_decode_notify()
H A Dsvclock.c46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
86 * Insert a blocked lock into the global list
141 * Find a block for a given lock
144 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) nlmsvc_lookup_block() argument
150 file, lock->fl.fl_pid, nlmsvc_lookup_block()
151 (long long)lock->fl.fl_start, nlmsvc_lookup_block()
152 (long long)lock->fl.fl_end, lock->fl.fl_type); nlmsvc_lookup_block()
154 fl = &block->b_call->a_args.lock.fl; nlmsvc_lookup_block()
160 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { nlmsvc_lookup_block()
203 * the blocked lock request. The spec explicitly mentions that the client
216 struct nlm_file *file, struct nlm_lock *lock, nlmsvc_create_block()
234 if (!nlmsvc_setgrantargs(call, lock)) nlmsvc_create_block()
238 call->a_args.lock.fl.fl_flags |= FL_SLEEP; nlmsvc_create_block()
239 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; nlmsvc_create_block()
278 status = posix_unblock_lock(&block->b_call->a_args.lock.fl); nlmsvc_unlink_block()
338 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) nlmsvc_setgrantargs() argument
340 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); nlmsvc_setgrantargs()
341 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); nlmsvc_setgrantargs()
342 call->a_args.lock.caller = utsname()->nodename; nlmsvc_setgrantargs()
343 call->a_args.lock.oh.len = lock->oh.len; nlmsvc_setgrantargs()
346 call->a_args.lock.oh.data = call->a_owner; nlmsvc_setgrantargs()
347 call->a_args.lock.svid = lock->fl.fl_pid; nlmsvc_setgrantargs()
349 if (lock->oh.len > NLMCLNT_OHSIZE) { nlmsvc_setgrantargs()
350 void *data = kmalloc(lock->oh.len, GFP_KERNEL); nlmsvc_setgrantargs()
353 call->a_args.lock.oh.data = (u8 *) data; nlmsvc_setgrantargs()
356 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); nlmsvc_setgrantargs()
362 if (call->a_args.lock.oh.data != call->a_owner) nlmsvc_freegrantargs()
363 kfree(call->a_args.lock.oh.data); nlmsvc_freegrantargs()
365 locks_release_private(&call->a_args.lock.fl); nlmsvc_freegrantargs()
369 * Deferred lock request handling for non-blocking lock
394 * Attempt to establish a lock, and if it can't be granted, block it
399 struct nlm_host *host, struct nlm_lock *lock, int wait, nlmsvc_lock()
409 lock->fl.fl_type, lock->fl.fl_pid, nlmsvc_lock()
410 (long long)lock->fl.fl_start, nlmsvc_lock()
411 (long long)lock->fl.fl_end, nlmsvc_lock()
419 block = nlmsvc_lookup_block(file, lock); nlmsvc_lock()
421 block = nlmsvc_create_block(rqstp, host, file, lock, cookie); nlmsvc_lock()
425 lock = &block->b_call->a_args.lock; nlmsvc_lock()
427 lock->fl.fl_flags &= ~FL_SLEEP; nlmsvc_lock()
456 lock->fl.fl_flags &= ~FL_SLEEP; nlmsvc_lock()
457 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); nlmsvc_lock()
458 lock->fl.fl_flags &= ~FL_SLEEP; nlmsvc_lock()
468 * already pending lock request then we need nlmsvc_lock()
478 /* Filesystem lock operation is in progress nlmsvc_lock()
502 * Test for presence of a conflicting lock.
506 struct nlm_host *host, struct nlm_lock *lock, nlmsvc_testlock()
515 lock->fl.fl_type, nlmsvc_testlock()
516 (long long)lock->fl.fl_start, nlmsvc_testlock()
517 (long long)lock->fl.fl_end); nlmsvc_testlock()
524 error = vfs_test_lock(file->f_file, &lock->fl); nlmsvc_testlock()
534 if (lock->fl.fl_type == F_UNLCK) { nlmsvc_testlock()
539 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", nlmsvc_testlock()
540 lock->fl.fl_type, (long long)lock->fl.fl_start, nlmsvc_testlock()
541 (long long)lock->fl.fl_end); nlmsvc_testlock()
545 conflock->svid = lock->fl.fl_pid; nlmsvc_testlock()
546 conflock->fl.fl_type = lock->fl.fl_type; nlmsvc_testlock()
547 conflock->fl.fl_start = lock->fl.fl_start; nlmsvc_testlock()
548 conflock->fl.fl_end = lock->fl.fl_end; nlmsvc_testlock()
549 locks_release_private(&lock->fl); nlmsvc_testlock()
556 * Remove a lock.
563 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) nlmsvc_unlock() argument
570 lock->fl.fl_pid, nlmsvc_unlock()
571 (long long)lock->fl.fl_start, nlmsvc_unlock()
572 (long long)lock->fl.fl_end); nlmsvc_unlock()
574 /* First, cancel any lock that might be there */ nlmsvc_unlock()
575 nlmsvc_cancel_blocked(net, file, lock); nlmsvc_unlock()
577 lock->fl.fl_type = F_UNLCK; nlmsvc_unlock()
578 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); nlmsvc_unlock()
591 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) nlmsvc_cancel_blocked() argument
599 lock->fl.fl_pid, nlmsvc_cancel_blocked()
600 (long long)lock->fl.fl_start, nlmsvc_cancel_blocked()
601 (long long)lock->fl.fl_end); nlmsvc_cancel_blocked()
607 block = nlmsvc_lookup_block(file, lock); nlmsvc_cancel_blocked()
611 &block->b_call->a_args.lock.fl); nlmsvc_cancel_blocked()
619 * This is a callback from the filesystem for VFS file lock requests.
622 * For SETLK or SETLKW request it will get the local posix lock.
644 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_grant_deferred()
669 * Unblock a blocked lock request. This is a callback invoked from the
670 * VFS layer when a lock on which we blocked is removed.
672 * This function doesn't grant the blocked lock instantly, but rather moves
683 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_notify_blocked()
718 * Try to claim a lock that was previously blocked.
732 struct nlm_lock *lock = &block->b_call->a_args.lock; nlmsvc_grant_blocked() local
736 dprintk("lockd: grant blocked lock %p\n", block); nlmsvc_grant_blocked()
751 /* Try the lock operation again */ nlmsvc_grant_blocked()
755 lock->fl.fl_flags |= FL_SLEEP; nlmsvc_grant_blocked()
756 fl_start = lock->fl.fl_start; nlmsvc_grant_blocked()
757 fl_end = lock->fl.fl_end; nlmsvc_grant_blocked()
758 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); nlmsvc_grant_blocked()
759 lock->fl.fl_flags &= ~FL_SLEEP; nlmsvc_grant_blocked()
760 lock->fl.fl_start = fl_start; nlmsvc_grant_blocked()
761 lock->fl.fl_end = fl_end; nlmsvc_grant_blocked()
767 dprintk("lockd: lock still blocked error %d\n", error); nlmsvc_grant_blocked()
781 dprintk("lockd: GRANTing blocked lock.\n"); nlmsvc_grant_blocked()
804 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
887 * If it is a blocking lock, call grant_blocked.
888 * For a non-blocking lock or test lock, revisit the request.
215 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_file *file, struct nlm_lock *lock, struct nlm_cookie *cookie) nlmsvc_create_block() argument
398 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie, int reclaim) nlmsvc_lock() argument
505 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, struct nlm_lock *conflock, struct nlm_cookie *cookie) nlmsvc_testlock() argument
H A DMakefile2 # Makefile for the linux lock manager stuff
H A Dclnt4xdr.c64 static void nlm4_compute_offsets(const struct nlm_lock *lock, nlm4_compute_offsets() argument
67 const struct file_lock *fl = &lock->fl; nlm4_compute_offsets()
264 const struct nlm_lock *lock = &result->lock; encode_nlm4_holder() local
268 encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_nlm4_holder()
269 encode_int32(xdr, lock->svid); encode_nlm4_holder()
270 encode_netobj(xdr, lock->oh.data, lock->oh.len); encode_nlm4_holder()
273 nlm4_compute_offsets(lock, &l_offset, &l_len); encode_nlm4_holder()
280 struct nlm_lock *lock = &result->lock; decode_nlm4_holder() local
281 struct file_lock *fl = &lock->fl; decode_nlm4_holder()
288 memset(lock, 0, sizeof(*lock)); decode_nlm4_holder()
295 lock->svid = be32_to_cpup(p); decode_nlm4_holder()
296 fl->fl_pid = (pid_t)lock->svid; decode_nlm4_holder()
298 error = decode_netobj(xdr, &lock->oh); decode_nlm4_holder()
330 /* NB: client-side does not set lock->len */ encode_caller_name()
349 const struct nlm_lock *lock) encode_nlm4_lock()
354 encode_caller_name(xdr, lock->caller); encode_nlm4_lock()
355 encode_fh(xdr, &lock->fh); encode_nlm4_lock()
356 encode_netobj(xdr, lock->oh.data, lock->oh.len); encode_nlm4_lock()
359 *p++ = cpu_to_be32(lock->svid); encode_nlm4_lock()
361 nlm4_compute_offsets(lock, &l_offset, &l_len); encode_nlm4_lock()
386 const struct nlm_lock *lock = &args->lock; nlm4_xdr_enc_testargs() local
389 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm4_xdr_enc_testargs()
390 encode_nlm4_lock(xdr, lock); nlm4_xdr_enc_testargs()
407 const struct nlm_lock *lock = &args->lock; nlm4_xdr_enc_lockargs() local
411 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm4_xdr_enc_lockargs()
412 encode_nlm4_lock(xdr, lock); nlm4_xdr_enc_lockargs()
429 const struct nlm_lock *lock = &args->lock; nlm4_xdr_enc_cancargs() local
433 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm4_xdr_enc_cancargs()
434 encode_nlm4_lock(xdr, lock); nlm4_xdr_enc_cancargs()
447 const struct nlm_lock *lock = &args->lock; nlm4_xdr_enc_unlockargs() local
450 encode_nlm4_lock(xdr, lock); nlm4_xdr_enc_unlockargs()
348 encode_nlm4_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) encode_nlm4_lock() argument
H A Dclntxdr.c60 static void nlm_compute_offsets(const struct nlm_lock *lock, nlm_compute_offsets() argument
63 const struct file_lock *fl = &lock->fl; nlm_compute_offsets()
259 const struct nlm_lock *lock = &result->lock; encode_nlm_holder() local
263 encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_nlm_holder()
264 encode_int32(xdr, lock->svid); encode_nlm_holder()
265 encode_netobj(xdr, lock->oh.data, lock->oh.len); encode_nlm_holder()
268 nlm_compute_offsets(lock, &l_offset, &l_len); encode_nlm_holder()
275 struct nlm_lock *lock = &result->lock; decode_nlm_holder() local
276 struct file_lock *fl = &lock->fl; decode_nlm_holder()
282 memset(lock, 0, sizeof(*lock)); decode_nlm_holder()
289 lock->svid = be32_to_cpup(p); decode_nlm_holder()
290 fl->fl_pid = (pid_t)lock->svid; decode_nlm_holder()
292 error = decode_netobj(xdr, &lock->oh); decode_nlm_holder()
324 /* NB: client-side does not set lock->len */ encode_caller_name()
343 const struct nlm_lock *lock) encode_nlm_lock()
348 encode_caller_name(xdr, lock->caller); encode_nlm_lock()
349 encode_fh(xdr, &lock->fh); encode_nlm_lock()
350 encode_netobj(xdr, lock->oh.data, lock->oh.len); encode_nlm_lock()
353 *p++ = cpu_to_be32(lock->svid); encode_nlm_lock()
355 nlm_compute_offsets(lock, &l_offset, &l_len); encode_nlm_lock()
379 const struct nlm_lock *lock = &args->lock; nlm_xdr_enc_testargs() local
382 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm_xdr_enc_testargs()
383 encode_nlm_lock(xdr, lock); nlm_xdr_enc_testargs()
400 const struct nlm_lock *lock = &args->lock; nlm_xdr_enc_lockargs() local
404 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm_xdr_enc_lockargs()
405 encode_nlm_lock(xdr, lock); nlm_xdr_enc_lockargs()
422 const struct nlm_lock *lock = &args->lock; nlm_xdr_enc_cancargs() local
426 encode_bool(xdr, lock->fl.fl_type == F_WRLCK); nlm_xdr_enc_cancargs()
427 encode_nlm_lock(xdr, lock); nlm_xdr_enc_cancargs()
440 const struct nlm_lock *lock = &args->lock; nlm_xdr_enc_unlockargs() local
443 encode_nlm_lock(xdr, lock); nlm_xdr_enc_unlockargs()
342 encode_nlm_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) encode_nlm_lock() argument
/linux-4.1.27/include/net/netfilter/
H A Dxt_rateest.h5 /* keep lock and bstats on same cache line to speedup xt_rateest_tg() */
7 spinlock_t lock; member in struct:xt_rateest
8 /* keep rstats and lock on same cache line to speedup xt_rateest_mt() */
/linux-4.1.27/arch/powerpc/lib/
H A Dlocks.c2 * Spin and read/write lock operations.
26 void __spin_yield(arch_spinlock_t *lock) __spin_yield() argument
30 lock_value = lock->slock; __spin_yield()
39 if (lock->slock != lock_value) __spin_yield()
47 * Waiting for a read lock or a write lock on a rwlock...
56 lock_value = rw->lock; __rw_yield()
58 return; /* no write lock at present */ __rw_yield()
65 if (rw->lock != lock_value) __rw_yield()
72 void arch_spin_unlock_wait(arch_spinlock_t *lock) arch_spin_unlock_wait() argument
76 while (lock->slock) { arch_spin_unlock_wait()
79 __spin_yield(lock); arch_spin_unlock_wait()
/linux-4.1.27/drivers/usb/gadget/function/
H A Du_ether_configfs.h42 mutex_lock(&opts->lock); \
44 mutex_unlock(&opts->lock); \
54 mutex_lock(&opts->lock); \
56 mutex_unlock(&opts->lock); \
61 mutex_unlock(&opts->lock); \
78 mutex_lock(&opts->lock); \
80 mutex_unlock(&opts->lock); \
90 mutex_lock(&opts->lock); \
92 mutex_unlock(&opts->lock); \
97 mutex_unlock(&opts->lock); \
114 mutex_lock(&opts->lock); \
116 mutex_unlock(&opts->lock); \
126 mutex_lock(&opts->lock); \
139 mutex_unlock(&opts->lock); \
154 mutex_lock(&opts->lock); \
156 mutex_unlock(&opts->lock); \
/linux-4.1.27/fs/dlm/
H A DMakefile5 lock.o \
/linux-4.1.27/fs/
H A Dfs_struct.c18 spin_lock(&fs->lock); set_fs_root()
23 spin_unlock(&fs->lock); set_fs_root()
37 spin_lock(&fs->lock); set_fs_pwd()
42 spin_unlock(&fs->lock); set_fs_pwd()
68 spin_lock(&fs->lock); do_each_thread()
77 spin_unlock(&fs->lock); do_each_thread()
100 spin_lock(&fs->lock); exit_fs()
103 spin_unlock(&fs->lock); exit_fs()
113 /* We don't need to lock fs - think why ;-) */ copy_fs_struct()
117 spin_lock_init(&fs->lock); copy_fs_struct()
121 spin_lock(&old->lock); copy_fs_struct()
126 spin_unlock(&old->lock); copy_fs_struct()
141 spin_lock(&fs->lock); unshare_fs_struct()
144 spin_unlock(&fs->lock); unshare_fs_struct()
163 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
H A Dfs_pin.c15 spin_lock_irq(&pin->wait.lock); pin_remove()
18 spin_unlock_irq(&pin->wait.lock); pin_remove()
44 spin_lock_irq(&p->wait.lock); pin_kill()
47 spin_unlock_irq(&p->wait.lock); pin_kill()
53 spin_unlock_irq(&p->wait.lock); pin_kill()
60 spin_unlock_irq(&p->wait.lock); pin_kill()
67 spin_lock_irq(&p->wait.lock); pin_kill()
69 spin_unlock_irq(&p->wait.lock); pin_kill()
/linux-4.1.27/arch/arc/include/asm/
H A Dspinlock.h17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
21 static inline void arch_spin_lock(arch_spinlock_t *lock) arch_spin_lock() argument
27 * after the lock for providing the ACQUIRE semantics. arch_spin_lock()
37 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) arch_spin_lock()
41 * ACQUIRE barrier to ensure load/store after taking the lock arch_spin_lock()
51 static inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spin_trylock() argument
60 : "r"(&(lock->slock)) arch_spin_trylock()
68 static inline void arch_spin_unlock(arch_spinlock_t *lock) arch_spin_unlock() argument
81 : "r"(&(lock->slock)) arch_spin_unlock()
106 /* 1 - lock taken successfully */ arch_read_trylock()
114 * zero means writer holds the lock exclusively, deny Reader. arch_read_trylock()
115 * Otherwise grant lock to first/subseq reader arch_read_trylock()
128 /* 1 - lock taken successfully */ arch_write_trylock()
136 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), arch_write_trylock()
176 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
179 #define arch_spin_relax(lock) cpu_relax()
180 #define arch_read_relax(lock) cpu_relax()
181 #define arch_write_relax(lock) cpu_relax()
/linux-4.1.27/include/drm/
H A Ddrm_modeset_lock.h39 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
47 * Contended lock: if a lock is contended you should only call
49 * contended lock.
74 * modeset lock
95 * drm_modeset_lock_init - initialize lock
96 * @lock: lock to init
98 static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock) drm_modeset_lock_init() argument
100 ww_mutex_init(&lock->mutex, &crtc_ww_class); drm_modeset_lock_init()
101 INIT_LIST_HEAD(&lock->head); drm_modeset_lock_init()
105 * drm_modeset_lock_fini - cleanup lock
106 * @lock: lock to cleanup
108 static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) drm_modeset_lock_fini() argument
110 WARN_ON(!list_empty(&lock->head)); drm_modeset_lock_fini()
115 * @lock: lock to check
117 static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) drm_modeset_is_locked() argument
119 return ww_mutex_is_locked(&lock->mutex); drm_modeset_is_locked()
122 int drm_modeset_lock(struct drm_modeset_lock *lock,
124 int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
126 void drm_modeset_unlock(struct drm_modeset_lock *lock);
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_dlm.h42 * - To allow clients to cache state protected by a lock by holding the
43 * lock until a conflicting lock is requested or it is expired by the LRU.
90 * decisions about lack of conflicts or do any autonomous lock granting without
108 * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
112 * There are six lock modes along with a compatibility matrix to indicate if
115 * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
118 * lock from an OST, a lock with PW mode will be issued.
120 * an OST, a lock with PR mode will be issued. Also, if the client opens a
121 * file for execution, it is granted a lock with PR mode.
122 * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
123 * requests a write lock during a file open operation.
125 * an inodebit lock with the CR mode on the intermediate path component.
249 /** Server lock volume (SLV). Protected by pl_lock. */
251 /** Current biggest client lock volume. Protected by pl_lock. */
272 typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
277 * be associated with an LDLM lock and transferred from client to server and
282 * - layout lock code to return the layout when the layout lock is granted
291 int (*lvbo_size)(struct ldlm_lock *lock);
293 int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
297 * LDLM pools related, type of lock pool in the namespace.
317 * Estimated lock callback time. Used by adaptive timeout code to
325 /** LDLM namespace lock stats */
354 * exist during new lock enqueue.
361 * Every lock obtained by client in that namespace is actually represented by
363 * linked by a special cookie by which one node can tell to the other which lock
404 * LRU lock list.
406 * This list is only used on clients for lock caching purposes.
431 * MDT will return an UPDATE lock along with a LOOKUP lock.
444 /** "policy" function that does actual lock conflict determination */
484 * If extended lock is requested for more then this many bytes and
496 /** LDLM lock stats */
533 * Returns 1 if namespace \a ns supports early lock cancel (ELC).
559 /** Type for blocking callback function of a lock. */
560 typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
563 /** Type for completion callback function of a lock. */
564 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
566 /** Type for glimpse callback function of a lock. */
567 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
571 struct ldlm_lock *gl_lock; /* lock to glimpse */
581 /** Interval node data for each LDLM_EXTENT lock. */
591 * The interval tree must be accessed under the resource lock.
598 ldlm_mode_t lit_mode; /* lock mode */
619 /* Protected by the hash lock */
645 * LDLM lock structure
647 * Represents a single LDLM lock and its state in memory. Each lock is
650 * depending on the lock type and whether the locks are conflicting or
655 * Local lock handle.
656 * When remote side wants to tell us about a lock, they address
659 * other threads or nodes. When the lock needs to be accessed
660 * from the handle, it is looked up again in the lock table, and
669 * we do not accidentally free lock structure that is in use.
673 * Internal spinlock protects l_resource. We should hold this lock
678 * Pointer to actual resource this lock is in.
688 * Linkage to resource's lock queues according to current lock state.
716 /** Lock completion handler pointer. Called when lock is granted. */
721 * - as a notification of an attempt to queue a conflicting lock (once)
722 * - as a notification when the lock is being cancelled.
725 * and then once more when the last user went away and the lock is
749 * Remote lock handle.
750 * If the lock is remote, this is the handle of the other side lock
756 * Representation of private data specific for a lock type.
757 * Examples are: extent range for extent lock or bitmask for ibits locks
774 * If the lock is granted, a process sleeps on this waitq to learn when
775 * it's no longer in use. If the lock is not granted, a process sleeps
782 * the lock, e.g. enqueue the lock or send blocking AST.
787 * Time last used by e.g. being matched by lock match.
792 /** Originally requested extent for the extent lock. */
807 /** Private storage for lock user. Opaque to LDLM. */
825 * then if the lock timed out, it is moved to
832 * Set when lock is sent a blocking AST. Time in seconds when timeout
833 * is reached and client holding this lock could be evicted.
835 * under this lock.
840 /** Local PID of process which created this lock. */
844 * Number of times blocking AST was sent for this lock.
858 * Pointer to a conflicting lock that caused blocking AST to be sent
859 * for this lock
882 * export blocking dlm lock list, protected by
884 * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
885 * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
897 * A resource can only hold locks of a single lock type, though there may be
898 * multiple ldlm_locks on a single resource, depending on the lock type and
943 * Server-side-only lock value block elements.
959 static inline bool ldlm_has_layout(struct ldlm_lock *lock) ldlm_has_layout() argument
961 return lock->l_resource->lr_type == LDLM_IBITS && ldlm_has_layout()
962 lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT; ldlm_has_layout()
978 ldlm_lock_to_ns(struct ldlm_lock *lock) ldlm_lock_to_ns() argument
980 return ldlm_res_to_ns(lock->l_resource); ldlm_lock_to_ns()
984 ldlm_lock_to_ns_name(struct ldlm_lock *lock) ldlm_lock_to_ns_name() argument
986 return ldlm_ns_name(ldlm_lock_to_ns(lock)); ldlm_lock_to_ns_name()
990 ldlm_lock_to_ns_at(struct ldlm_lock *lock) ldlm_lock_to_ns_at() argument
992 return &lock->l_resource->lr_ns_bucket->nsb_at_estimate; ldlm_lock_to_ns_at()
1005 static inline int ldlm_lvbo_size(struct ldlm_lock *lock) ldlm_lvbo_size() argument
1007 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lvbo_size()
1010 return ns->ns_lvbo->lvbo_size(lock); ldlm_lvbo_size()
1015 static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) ldlm_lvbo_fill() argument
1017 struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); ldlm_lvbo_fill()
1021 return ns->ns_lvbo->lvbo_fill(lock, buf, len); ldlm_lvbo_fill()
1040 __u32 ei_type; /** Type of the lock being enqueued. */
1041 __u32 ei_mode; /** Mode of the lock being enqueued. */
1042 void *ei_cb_bl; /** blocking lock callback */
1043 void *ei_cb_cp; /** lock completion callback */
1044 void *ei_cb_gl; /** lock glimpse callback */
1056 * For the cases where we do not have actual lock to print along
1063 * Support function for lock information printing into debug logs.
1066 #define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
1072 _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
1075 void _ldlm_lock_debug(struct ldlm_lock *lock,
1081 * Rate-limited version of lock printing function.
1083 #define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
1086 ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
1089 #define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
1090 #define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
1092 /** Non-rate-limited lock printing function for debugging purposes. */
1093 #define LDLM_DEBUG(lock, fmt, a...) do { \
1094 if (likely(lock != NULL)) { \
1096 ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
1099 LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \
1103 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
1108 * Return values for lock iterators.
1109 * Also used during deciding of lock grants and cancellations.
1119 * LDLM provides for a way to iterate through every lock on a resource or
1133 int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1136 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
1145 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
1146 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
1155 void ldlm_lock2handle(const struct ldlm_lock *lock,
1163 * Obtain a lock reference by its handle.
1170 #define LDLM_LOCK_REF_DEL(lock) \
1171 lu_ref_del(&lock->l_reference, "handle", current)
1176 struct ldlm_lock *lock; ldlm_handle2lock_long() local
1178 lock = __ldlm_handle2lock(h, flags); ldlm_handle2lock_long()
1179 if (lock != NULL) ldlm_handle2lock_long()
1180 LDLM_LOCK_REF_DEL(lock); ldlm_handle2lock_long()
1181 return lock; ldlm_handle2lock_long()
1207 * Release a temporary lock reference obtained by ldlm_handle2lock() or
1210 #define LDLM_LOCK_PUT(lock) \
1212 LDLM_LOCK_REF_DEL(lock); \
1213 /*LDLM_DEBUG((lock), "put");*/ \
1214 ldlm_lock_put(lock); \
1218 * Release a lock reference obtained by some other means (see
1221 #define LDLM_LOCK_RELEASE(lock) \
1223 /*LDLM_DEBUG((lock), "put");*/ \
1224 ldlm_lock_put(lock); \
1227 #define LDLM_LOCK_GET(lock) \
1229 ldlm_lock_get(lock); \
1230 /*LDLM_DEBUG((lock), "get");*/ \
1231 lock; \
1247 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
1248 void ldlm_lock_put(struct ldlm_lock *lock);
1249 void ldlm_lock_destroy(struct ldlm_lock *lock);
1250 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
1255 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
1256 void ldlm_lock_fail_match(struct ldlm_lock *lock);
1257 void ldlm_lock_allow_match(struct ldlm_lock *lock);
1258 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
1265 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
1267 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
1268 void ldlm_lock_cancel(struct ldlm_lock *lock);
1303 struct ldlm_lock *lock); ldlm_proc_cleanup()
1304 void ldlm_resource_unlink_lock(struct ldlm_lock *lock); ldlm_proc_cleanup()
1324 * also used by client-side lock handlers to perform minimum level base
1327 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
1328 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
1330 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
1331 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
1332 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
1417 * than one lock_res is dead-lock safe.
1449 struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock);
1450 void unlock_res_and_lock(struct ldlm_lock *lock);
1475 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
1476 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlovsub_lock.c51 * Lovsub lock operations.
87 * method is called whenever sub-lock state changes. Propagates state change
112 * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
113 * asking parent lock.
118 struct lovsub_lock *lock = cl2lovsub_lock(slice); lovsub_lock_weigh() local
124 if (!list_empty(&lock->lss_parents)) { lovsub_lock_weigh()
130 lov = container_of(lock->lss_parents.next, lovsub_lock_weigh()
179 * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
185 * - when top-lock finds existing sub-lock in the cache.
187 * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
188 * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
213 * Notify top-lock about modification, if lock description changes lov_sublock_modify()
227 struct lovsub_lock *lock = cl2lovsub_lock(s); lovsub_lock_modify() local
234 list_for_each_entry(scan, &lock->lss_parents, lll_list) { lovsub_lock_modify()
239 rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx); lovsub_lock_modify()
271 * top-lock.
286 /* See LU-1355 for the case that a glimpse lock is lovsub_lock_delete_one()
296 * Here lies a problem: a sub-lock is canceled while top-lock lovsub_lock_delete_one()
297 * is being unlocked. Top-lock cannot be moved into CLS_NEW lovsub_lock_delete_one()
299 * placing lock into CLS_CACHED (or failing it), see lovsub_lock_delete_one()
300 * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED lovsub_lock_delete_one()
302 * sub-locks exist in CLS_CACHED (this allows cached top-lock lovsub_lock_delete_one()
303 * to be reused immediately). Nor can we wait for top-lock lovsub_lock_delete_one()
309 * the top-lock, that will be seen by the next call to lovsub_lock_delete_one()
317 * if a sub-lock is canceled move its top-lock into CLS_NEW lovsub_lock_delete_one()
318 * state to preserve an invariant that a top-lock in lovsub_lock_delete_one()
320 * sub-locks), and so that next attempt to re-use the top-lock lovsub_lock_delete_one()
321 * enqueues missing sub-lock. lovsub_lock_delete_one()
327 * if last sub-lock is canceled, destroy the top-lock (which lovsub_lock_delete_one()
332 * as cancellation of a top-lock might acquire mutices lovsub_lock_delete_one()
333 * of its other sub-locks, violating lock ordering, lovsub_lock_delete_one()
336 * To work around this, the mutex of this sub-lock is lovsub_lock_delete_one()
337 * released, top-lock is destroyed, and sub-lock mutex lovsub_lock_delete_one()
344 * TODO: The lock modal here is too complex, because lovsub_lock_delete_one()
345 * the lock may be canceled and deleted by voluntarily: lovsub_lock_delete_one()
366 CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n"); lovsub_lock_delete_one()
378 * invoked in "bottom-to-top" delete, when lock destruction starts from the
379 * sub-lock (e.g, as a result of ldlm lock LRU policy).
391 * Destruction of a sub-lock might take multiple iterations, because lovsub_lock_delete()
392 * when the last sub-lock of a given top-lock is deleted, top-lock is lovsub_lock_delete()
393 * canceled proactively, and this requires to release sub-lock lovsub_lock_delete()
394 * mutex. Once sub-lock mutex has been released, list of its parents lovsub_lock_delete()
451 struct cl_lock *lock, const struct cl_io *io) lovsub_lock_init()
459 cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); lovsub_lock_init()
450 lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io) lovsub_lock_init() argument
/linux-4.1.27/fs/reiserfs/
H A Dlock.c17 * Also this lock is often released before a call that could block because
26 mutex_lock(&sb_i->lock); reiserfs_write_lock()
39 * Are we unlocking without even holding the lock? reiserfs_write_unlock()
47 mutex_unlock(&sb_i->lock); reiserfs_write_unlock()
56 /* this can happen when the lock isn't always held */ reiserfs_write_unlock_nested()
64 mutex_unlock(&sb_i->lock); reiserfs_write_unlock_nested()
73 /* this can happen when the lock isn't always held */ reiserfs_write_lock_nested()
77 mutex_lock(&sb_i->lock); reiserfs_write_lock_nested()
84 * write lock held. caller is the string printed just before calling BUG()
98 WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n"); reiserfs_lock_check_recursive()
/linux-4.1.27/sound/firewire/bebob/
H A Dbebob_hwdep.c14 * 3.lock/unlock stream
27 spin_lock_irq(&bebob->lock); hwdep_read()
31 spin_unlock_irq(&bebob->lock); hwdep_read()
36 spin_lock_irq(&bebob->lock); hwdep_read()
48 spin_unlock_irq(&bebob->lock); hwdep_read()
64 spin_lock_irq(&bebob->lock); hwdep_poll()
69 spin_unlock_irq(&bebob->lock); hwdep_poll()
99 spin_lock_irq(&bebob->lock); hwdep_lock()
108 spin_unlock_irq(&bebob->lock); hwdep_lock()
118 spin_lock_irq(&bebob->lock); hwdep_unlock()
127 spin_unlock_irq(&bebob->lock); hwdep_unlock()
137 spin_lock_irq(&bebob->lock); hwdep_release()
140 spin_unlock_irq(&bebob->lock); hwdep_release()
/linux-4.1.27/sound/firewire/oxfw/
H A Doxfw-hwdep.c14 * 3.lock/unlock stream
26 spin_lock_irq(&oxfw->lock); hwdep_read()
30 spin_unlock_irq(&oxfw->lock); hwdep_read()
35 spin_lock_irq(&oxfw->lock); hwdep_read()
47 spin_unlock_irq(&oxfw->lock); hwdep_read()
63 spin_lock_irq(&oxfw->lock); hwdep_poll()
68 spin_unlock_irq(&oxfw->lock); hwdep_poll()
96 spin_lock_irq(&oxfw->lock); hwdep_lock()
105 spin_unlock_irq(&oxfw->lock); hwdep_lock()
114 spin_lock_irq(&oxfw->lock); hwdep_unlock()
123 spin_unlock_irq(&oxfw->lock); hwdep_unlock()
132 spin_lock_irq(&oxfw->lock); hwdep_release()
135 spin_unlock_irq(&oxfw->lock); hwdep_release()
/linux-4.1.27/tools/lib/lockdep/
H A Dpreload.c11 * struct lock_lookup - liblockdep's view of a single unique lock
12 * @orig: pointer to the original pthread lock, used for lookups
15 * @node: rb-tree node used to store the lock in a global tree
16 * @name: a unique name for the lock
19 void *orig; /* Original pthread lock, used for lookups */
21 * a dep_map and a key for each lock */
99 static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent) __get_lock_node() argument
110 if (lock < l->orig) __get_lock_node()
112 else if (lock > l->orig) __get_lock_node()
130 static inline bool is_static_lock(struct lock_lookup *lock) is_static_lock() argument
132 return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks); is_static_lock()
162 static inline void free_lock(struct lock_lookup *lock) free_lock() argument
164 if (likely(!is_static_lock(lock))) free_lock()
165 free(lock); free_lock()
169 * __get_lock - find or create a lock instance
170 * @lock: pointer to a pthread lock function
172 * Try to find an existing lock in the rbtree using the provided pointer. If
175 static struct lock_lookup *__get_lock(void *lock) __get_lock() argument
181 node = __get_lock_node(lock, &parent); __get_lock()
187 /* We didn't find the lock, let's create it */ __get_lock()
192 l->orig = lock; __get_lock()
194 * Currently the name of the lock is the ptr value of the pthread lock, __get_lock()
197 * TODO: Get the real name of the lock using libdwarf __get_lock()
199 sprintf(l->name, "%p", lock); __get_lock()
204 node = __get_lock_node(lock, &parent); __get_lock()
212 static void __del_lock(struct lock_lookup *lock) __del_lock() argument
215 rb_erase(&lock->node, &locks); __del_lock()
217 free_lock(lock); __del_lock()
242 * initializing a held lock. pthread_mutex_init()
263 * lock before actually taking it, but here we must deal with the case pthread_mutex_lock()
266 * To do that we'll "release" the lock if locking failed - this way pthread_mutex_lock()
268 * the lock, and if that fails - we'll be back to the correct pthread_mutex_lock()
300 * Just like taking a lock, only in reverse! pthread_mutex_unlock()
302 * If we fail releasing the lock, tell lockdep we're holding it again. pthread_mutex_unlock()
316 * Let's see if we're releasing a lock that's held. pthread_mutex_destroy()
/linux-4.1.27/drivers/clk/berlin/
H A Dberlin2-div.c68 spinlock_t *lock; member in struct:berlin2_div
81 if (div->lock) berlin2_div_is_enabled()
82 spin_lock(div->lock); berlin2_div_is_enabled()
87 if (div->lock) berlin2_div_is_enabled()
88 spin_unlock(div->lock); berlin2_div_is_enabled()
99 if (div->lock) berlin2_div_enable()
100 spin_lock(div->lock); berlin2_div_enable()
106 if (div->lock) berlin2_div_enable()
107 spin_unlock(div->lock); berlin2_div_enable()
118 if (div->lock) berlin2_div_disable()
119 spin_lock(div->lock); berlin2_div_disable()
125 if (div->lock) berlin2_div_disable()
126 spin_unlock(div->lock); berlin2_div_disable()
135 if (div->lock) berlin2_div_set_parent()
136 spin_lock(div->lock); berlin2_div_set_parent()
154 if (div->lock) berlin2_div_set_parent()
155 spin_unlock(div->lock); berlin2_div_set_parent()
167 if (div->lock) berlin2_div_get_parent()
168 spin_lock(div->lock); berlin2_div_get_parent()
180 if (div->lock) berlin2_div_get_parent()
181 spin_unlock(div->lock); berlin2_div_get_parent()
193 if (div->lock) berlin2_div_recalc_rate()
194 spin_lock(div->lock); berlin2_div_recalc_rate()
216 if (div->lock) berlin2_div_recalc_rate()
217 spin_unlock(div->lock); berlin2_div_recalc_rate()
241 unsigned long flags, spinlock_t *lock) berlin2_div_register()
255 div->lock = lock; berlin2_div_register()
238 berlin2_div_register(const struct berlin2_div_map *map, void __iomem *base, const char *name, u8 div_flags, const char **parent_names, int num_parents, unsigned long flags, spinlock_t *lock) berlin2_div_register() argument
/linux-4.1.27/arch/s390/hypfs/
H A Dhypfs_dbfs.c41 mutex_lock(&df->lock); dbfs_read()
44 mutex_unlock(&df->lock); dbfs_read()
49 mutex_unlock(&df->lock); dbfs_read()
53 mutex_unlock(&df->lock); dbfs_read()
65 mutex_lock(&df->lock); dbfs_ioctl()
70 mutex_unlock(&df->lock); dbfs_ioctl()
86 mutex_init(&df->lock); hypfs_dbfs_create_file()
/linux-4.1.27/net/atm/
H A Daddr.c53 spin_lock_irqsave(&dev->lock, flags); atm_reset_addr()
62 spin_unlock_irqrestore(&dev->lock, flags);
78 spin_lock_irqsave(&dev->lock, flags); atm_add_addr()
85 spin_unlock_irqrestore(&dev->lock, flags); list_for_each_entry()
91 spin_unlock_irqrestore(&dev->lock, flags);
96 spin_unlock_irqrestore(&dev->lock, flags);
113 spin_lock_irqsave(&dev->lock, flags); atm_del_addr()
121 spin_unlock_irqrestore(&dev->lock, flags); list_for_each_entry()
128 spin_unlock_irqrestore(&dev->lock, flags);
141 spin_lock_irqsave(&dev->lock, flags); atm_get_addr()
150 spin_unlock_irqrestore(&dev->lock, flags); atm_get_addr()
155 spin_unlock_irqrestore(&dev->lock, flags); atm_get_addr()
/linux-4.1.27/sound/firewire/dice/
H A Ddice-hwdep.c19 spin_lock_irq(&dice->lock); hwdep_read()
23 spin_unlock_irq(&dice->lock); hwdep_read()
28 spin_lock_irq(&dice->lock); hwdep_read()
47 spin_unlock_irq(&dice->lock); hwdep_read()
63 spin_lock_irq(&dice->lock); hwdep_poll()
68 spin_unlock_irq(&dice->lock); hwdep_poll()
96 spin_lock_irq(&dice->lock); hwdep_lock()
105 spin_unlock_irq(&dice->lock); hwdep_lock()
114 spin_lock_irq(&dice->lock); hwdep_unlock()
123 spin_unlock_irq(&dice->lock); hwdep_unlock()
132 spin_lock_irq(&dice->lock); hwdep_release()
135 spin_unlock_irq(&dice->lock); hwdep_release()
/linux-4.1.27/tools/perf/arch/x86/util/
H A Dtsc.c20 seq = pc->lock; perf_read_tsc_conversion()
27 if (pc->lock == seq && !(seq & 1)) perf_read_tsc_conversion()
30 pr_debug("failed to get perf_event_mmap_page lock\n"); perf_read_tsc_conversion()
/linux-4.1.27/arch/mips/vr41xx/common/
H A Dicu.c162 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_piuint()
164 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_piuint()
177 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_piuint()
179 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_piuint()
192 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_aiuint()
194 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_aiuint()
207 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_aiuint()
209 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_aiuint()
222 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_kiuint()
224 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_kiuint()
237 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_kiuint()
239 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_kiuint()
250 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_macint()
252 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_macint()
262 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_macint()
264 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_macint()
274 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_dsiuint()
276 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_dsiuint()
286 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_dsiuint()
288 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_dsiuint()
298 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_firint()
300 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_firint()
310 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_firint()
312 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_firint()
325 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_pciint()
327 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_pciint()
341 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_pciint()
343 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_pciint()
357 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_scuint()
359 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_scuint()
373 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_scuint()
375 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_scuint()
389 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_csiint()
391 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_csiint()
405 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_csiint()
407 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_csiint()
421 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_bcuint()
423 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_bcuint()
437 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_bcuint()
439 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_bcuint()
485 raw_spin_lock_irq(&desc->lock); set_sysint1_assign()
524 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
532 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
545 raw_spin_lock_irq(&desc->lock); set_sysint2_assign()
592 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
600 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnv10_fence.h16 spinlock_t lock; member in struct:nv10_fence_priv
/linux-4.1.27/drivers/gpu/ipu-v3/
H A Dipu-smfc.c31 spinlock_t lock; member in struct:ipu_smfc_priv
48 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_set_burstsize()
56 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_set_burstsize()
68 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_map_channel()
76 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_map_channel()
88 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_set_watermark()
96 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_set_watermark()
107 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_enable()
114 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_enable()
125 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_disable()
135 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_disable()
153 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_get()
162 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_get()
172 spin_lock_irqsave(&priv->lock, flags); ipu_smfc_put()
174 spin_unlock_irqrestore(&priv->lock, flags); ipu_smfc_put()
189 spin_lock_init(&priv->lock); ipu_smfc_init()
/linux-4.1.27/fs/afs/
H A Dflock.c29 * initialise the lock manager thread if it isn't already running
50 * destroy the lock manager thread if it's running
59 * if the callback is broken on this vnode, then the lock may now be available
69 * the lock will time out in 5 minutes unless we extend it, so schedule
80 * first lock in the queue is itself a readlock)
81 * - the caller must hold the vnode lock
102 * do work for a lock, including:
103 * - probing for a lock we're waiting on but didn't get immediately
104 * - extending a lock that's close to timing out
117 spin_lock(&vnode->lock); afs_lock_work()
121 spin_unlock(&vnode->lock); afs_lock_work()
123 /* attempt to release the server lock; if it fails, we just afs_lock_work()
128 " Failed to release lock on {%x:%x} error %d\n", afs_lock_work()
131 spin_lock(&vnode->lock); afs_lock_work()
137 /* if we've got a lock, then it must be time to extend that lock as AFS afs_lock_work()
147 spin_unlock(&vnode->lock); afs_lock_work()
157 /* ummm... we failed to extend the lock - retry afs_lock_work()
160 " Failed to extend lock on {%x:%x} error %d\n", afs_lock_work()
170 /* if we don't have a granted lock, then we must've been called back by afs_lock_work()
171 * the server, and so if might be possible to get a lock we're afs_lock_work()
183 spin_unlock(&vnode->lock); afs_lock_work()
199 spin_lock(&vnode->lock); afs_lock_work()
200 /* the pending lock may have been withdrawn due to a afs_lock_work()
210 spin_unlock(&vnode->lock); afs_lock_work()
215 spin_unlock(&vnode->lock); afs_lock_work()
227 /* looks like the lock request was withdrawn on a signal */ afs_lock_work()
228 spin_unlock(&vnode->lock); afs_lock_work()
236 * - the caller must hold the vnode lock
251 * request a lock on a file on the server
290 spin_lock(&vnode->lock); afs_do_setlk()
305 /* if there's no-one else with a lock on this vnode, then we need to afs_do_setlk()
306 * ask the server for a lock */ afs_do_setlk()
316 spin_unlock(&vnode->lock); afs_do_setlk()
326 spin_lock(&vnode->lock); afs_do_setlk()
332 spin_lock(&vnode->lock); afs_do_setlk()
334 spin_unlock(&vnode->lock); afs_do_setlk()
339 /* otherwise, we need to wait for a local lock to become available */ afs_do_setlk()
348 spin_unlock(&vnode->lock); afs_do_setlk()
350 /* now we need to sleep and wait for the lock manager thread to get the afs_do_setlk()
351 * lock from the server */ afs_do_setlk()
359 spin_lock(&vnode->lock); afs_do_setlk()
364 * giving us the lock */ afs_do_setlk()
368 spin_lock(&vnode->lock); afs_do_setlk()
372 spin_unlock(&vnode->lock); afs_do_setlk()
379 /* we aren't going to get the lock, either because we're unwilling to afs_do_setlk()
385 /* kick the next pending lock into having a go */ afs_do_setlk()
392 spin_unlock(&vnode->lock); afs_do_setlk()
396 /* we've acquired a server lock, but it needs to be renewed after 5 afs_do_setlk()
398 spin_lock(&vnode->lock); afs_do_setlk()
405 /* the lock has been granted as far as we're concerned... */ afs_do_setlk()
416 spin_unlock(&vnode->lock); afs_do_setlk()
429 /* the VFS rejected the lock we just obtained, so we have to discard afs_do_setlk()
457 spin_lock(&vnode->lock); afs_do_unlk()
460 spin_unlock(&vnode->lock); afs_do_unlk()
465 /* discard the server lock only if all granted locks are gone */ afs_do_unlk()
468 spin_unlock(&vnode->lock); afs_do_unlk()
474 * return information about a lock we currently hold, if indeed we hold one
488 /* check local lock records first */ afs_do_getlk()
549 * Note: we could try to fake a POSIX lock request here by afs_flock()
564 * the POSIX lock management core VFS code copies the lock record and adds the
565 * copy into its own list, so we need to add that copy to the vnode's lock
577 * need to remove this lock from the vnode queue when it's removed from the
H A Dvnode.c111 * - caller must hold vnode->lock
220 spin_lock(&vnode->lock); afs_vnode_finalise_status_update()
225 spin_unlock(&vnode->lock); afs_vnode_finalise_status_update()
239 spin_lock(&vnode->lock); afs_vnode_status_update_failed()
251 spin_unlock(&vnode->lock); afs_vnode_status_update_failed()
292 spin_lock(&vnode->lock); afs_vnode_fetch_status()
296 spin_unlock(&vnode->lock); afs_vnode_fetch_status()
327 spin_unlock(&vnode->lock); afs_vnode_fetch_status()
332 spin_lock(&vnode->lock); afs_vnode_fetch_status()
336 spin_unlock(&vnode->lock); afs_vnode_fetch_status()
347 spin_unlock(&vnode->lock); afs_vnode_fetch_status()
383 spin_lock(&vnode->lock); afs_vnode_fetch_status()
386 spin_unlock(&vnode->lock); afs_vnode_fetch_status()
409 spin_lock(&vnode->lock); afs_vnode_fetch_data()
411 spin_unlock(&vnode->lock); afs_vnode_fetch_data()
440 spin_lock(&vnode->lock); afs_vnode_fetch_data()
443 spin_unlock(&vnode->lock); afs_vnode_fetch_data()
467 spin_lock(&vnode->lock); afs_vnode_create()
469 spin_unlock(&vnode->lock); afs_vnode_create()
497 spin_lock(&vnode->lock); afs_vnode_create()
500 spin_unlock(&vnode->lock); afs_vnode_create()
523 spin_lock(&vnode->lock); afs_vnode_remove()
525 spin_unlock(&vnode->lock); afs_vnode_remove()
552 spin_lock(&vnode->lock); afs_vnode_remove()
555 spin_unlock(&vnode->lock); afs_vnode_remove()
582 spin_lock(&vnode->lock); afs_vnode_link()
584 spin_unlock(&vnode->lock); afs_vnode_link()
585 spin_lock(&dvnode->lock); afs_vnode_link()
587 spin_unlock(&dvnode->lock); afs_vnode_link()
616 spin_lock(&vnode->lock); afs_vnode_link()
619 spin_unlock(&vnode->lock); afs_vnode_link()
620 spin_lock(&dvnode->lock); afs_vnode_link()
623 spin_unlock(&dvnode->lock); afs_vnode_link()
649 spin_lock(&vnode->lock); afs_vnode_symlink()
651 spin_unlock(&vnode->lock); afs_vnode_symlink()
679 spin_lock(&vnode->lock); afs_vnode_symlink()
682 spin_unlock(&vnode->lock); afs_vnode_symlink()
714 spin_lock(&orig_dvnode->lock); afs_vnode_rename()
716 spin_unlock(&orig_dvnode->lock); afs_vnode_rename()
718 spin_lock(&new_dvnode->lock); afs_vnode_rename()
720 spin_unlock(&new_dvnode->lock); afs_vnode_rename()
752 spin_lock(&orig_dvnode->lock); afs_vnode_rename()
755 spin_unlock(&orig_dvnode->lock); afs_vnode_rename()
757 spin_lock(&new_dvnode->lock); afs_vnode_rename()
760 spin_unlock(&new_dvnode->lock); afs_vnode_rename()
785 spin_lock(&vnode->lock); afs_vnode_store_data()
787 spin_unlock(&vnode->lock); afs_vnode_store_data()
814 spin_lock(&vnode->lock); afs_vnode_store_data()
817 spin_unlock(&vnode->lock); afs_vnode_store_data()
838 spin_lock(&vnode->lock); afs_vnode_setattr()
840 spin_unlock(&vnode->lock); afs_vnode_setattr()
866 spin_lock(&vnode->lock); afs_vnode_setattr()
869 spin_unlock(&vnode->lock); afs_vnode_setattr()
913 * get a lock on a file
952 * extend a lock on a file
990 * release a lock on a file
/linux-4.1.27/sound/oss/
H A Dv_midi.h6 spinlock_t lock; member in struct:vmidi_devc
/linux-4.1.27/drivers/staging/unisys/visorutil/
H A Dcharqueue.c31 spinlock_t lock; /* read/write lock for this structure */ member in struct:charqueue
48 spin_lock_init(&cq->lock); visor_charqueue_create()
57 spin_lock(&charqueue->lock); visor_charqueue_enqueue()
63 spin_unlock(&charqueue->lock); visor_charqueue_enqueue()
71 spin_lock(&charqueue->lock); visor_charqueue_is_empty()
73 spin_unlock(&charqueue->lock); visor_charqueue_is_empty()
92 spin_lock(&charqueue->lock); charqueue_dequeue()
94 spin_unlock(&charqueue->lock); charqueue_dequeue()
103 spin_lock(&charqueue->lock); visor_charqueue_dequeue_n()
116 spin_unlock(&charqueue->lock); visor_charqueue_dequeue_n()
/linux-4.1.27/drivers/clk/spear/
H A Dclk-frac-synth.c75 if (frac->lock) clk_frac_recalc_rate()
76 spin_lock_irqsave(frac->lock, flags); clk_frac_recalc_rate()
80 if (frac->lock) clk_frac_recalc_rate()
81 spin_unlock_irqrestore(frac->lock, flags); clk_frac_recalc_rate()
106 if (frac->lock) clk_frac_set_rate()
107 spin_lock_irqsave(frac->lock, flags); clk_frac_set_rate()
113 if (frac->lock) clk_frac_set_rate()
114 spin_unlock_irqrestore(frac->lock, flags); clk_frac_set_rate()
127 struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock) clk_register_frac()
148 frac->lock = lock; clk_register_frac()
125 clk_register_frac(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock) clk_register_frac() argument
H A Dclk-gpt-synth.c62 if (gpt->lock) clk_gpt_recalc_rate()
63 spin_lock_irqsave(gpt->lock, flags); clk_gpt_recalc_rate()
67 if (gpt->lock) clk_gpt_recalc_rate()
68 spin_unlock_irqrestore(gpt->lock, flags); clk_gpt_recalc_rate()
91 if (gpt->lock) clk_gpt_set_rate()
92 spin_lock_irqsave(gpt->lock, flags); clk_gpt_set_rate()
102 if (gpt->lock) clk_gpt_set_rate()
103 spin_unlock_irqrestore(gpt->lock, flags); clk_gpt_set_rate()
116 rtbl_cnt, spinlock_t *lock) clk_register_gpt()
137 gpt->lock = lock; clk_register_gpt()
114 clk_register_gpt(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock) clk_register_gpt() argument
H A Dclk.h55 spinlock_t *lock; member in struct:clk_aux
68 spinlock_t *lock; member in struct:clk_frac
82 spinlock_t *lock; member in struct:clk_gpt
99 spinlock_t *lock; member in struct:clk_vco
106 spinlock_t *lock; member in struct:clk_pll
116 u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
119 struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
122 rtbl_cnt, spinlock_t *lock);
127 spinlock_t *lock, struct clk **pll_clk,
H A Dclk-vco-pll.c131 if (pll->vco->lock) clk_pll_recalc_rate()
132 spin_lock_irqsave(pll->vco->lock, flags); clk_pll_recalc_rate()
136 if (pll->vco->lock) clk_pll_recalc_rate()
137 spin_unlock_irqrestore(pll->vco->lock, flags); clk_pll_recalc_rate()
154 if (pll->vco->lock) clk_pll_set_rate()
155 spin_lock_irqsave(pll->vco->lock, flags); clk_pll_set_rate()
162 if (pll->vco->lock) clk_pll_set_rate()
163 spin_unlock_irqrestore(pll->vco->lock, flags); clk_pll_set_rate()
199 if (vco->lock) clk_vco_recalc_rate()
200 spin_lock_irqsave(vco->lock, flags); clk_vco_recalc_rate()
206 if (vco->lock) clk_vco_recalc_rate()
207 spin_unlock_irqrestore(vco->lock, flags); clk_vco_recalc_rate()
241 if (vco->lock) clk_vco_set_rate()
242 spin_lock_irqsave(vco->lock, flags); clk_vco_set_rate()
263 if (vco->lock) clk_vco_set_rate()
264 spin_unlock_irqrestore(vco->lock, flags); clk_vco_set_rate()
279 spinlock_t *lock, struct clk **pll_clk, clk_register_vco_pll()
311 vco->lock = lock; clk_register_vco_pll()
319 parent_name, 0, mode_reg, PLL_ENABLE, 0, lock); clk_register_vco_pll()
275 clk_register_vco_pll(const char *vco_name, const char *pll_name, const char *vco_gate_name, const char *parent_name, unsigned long flags, void __iomem *mode_reg, void __iomem *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock, struct clk **pll_clk, struct clk **vco_gate_clk) clk_register_vco_pll() argument
/linux-4.1.27/drivers/clk/tegra/
H A Dclk-pll-out.c48 if (pll_out->lock) clk_pll_out_enable()
49 spin_lock_irqsave(pll_out->lock, flags); clk_pll_out_enable()
58 if (pll_out->lock) clk_pll_out_enable()
59 spin_unlock_irqrestore(pll_out->lock, flags); clk_pll_out_enable()
70 if (pll_out->lock) clk_pll_out_disable()
71 spin_lock_irqsave(pll_out->lock, flags); clk_pll_out_disable()
80 if (pll_out->lock) clk_pll_out_disable()
81 spin_unlock_irqrestore(pll_out->lock, flags); clk_pll_out_disable()
93 spinlock_t *lock) tegra_clk_register_pll_out()
113 pll_out->lock = lock; tegra_clk_register_pll_out()
90 tegra_clk_register_pll_out(const char *name, const char *parent_name, void __iomem *reg, u8 enb_bit_idx, u8 rst_bit_idx, unsigned long flags, u8 pll_out_flags, spinlock_t *lock) tegra_clk_register_pll_out() argument
/linux-4.1.27/drivers/clk/
H A Dclk-fractional-divider.c28 if (fd->lock) clk_fd_recalc_rate()
29 spin_lock_irqsave(fd->lock, flags); clk_fd_recalc_rate()
33 if (fd->lock) clk_fd_recalc_rate()
34 spin_unlock_irqrestore(fd->lock, flags); clk_fd_recalc_rate()
81 if (fd->lock) clk_fd_set_rate()
82 spin_lock_irqsave(fd->lock, flags); clk_fd_set_rate()
89 if (fd->lock) clk_fd_set_rate()
90 spin_unlock_irqrestore(fd->lock, flags); clk_fd_set_rate()
105 u8 clk_divider_flags, spinlock_t *lock) clk_register_fractional_divider()
129 fd->lock = lock; clk_register_fractional_divider()
102 clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock) clk_register_fractional_divider() argument
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_marker.c42 spin_lock_init(&queue->lock); vmw_marker_queue_init()
49 spin_lock(&queue->lock); vmw_marker_queue_takedown()
53 spin_unlock(&queue->lock); vmw_marker_queue_takedown()
66 spin_lock(&queue->lock); vmw_marker_push()
68 spin_unlock(&queue->lock); vmw_marker_push()
80 spin_lock(&queue->lock); vmw_marker_pull()
102 spin_unlock(&queue->lock); vmw_marker_pull()
111 spin_lock(&queue->lock); vmw_fifo_lag()
115 spin_unlock(&queue->lock); vmw_fifo_lag()
136 spin_lock(&queue->lock); vmw_wait_lag()
144 spin_unlock(&queue->lock); vmw_wait_lag()
/linux-4.1.27/drivers/block/aoe/
H A Daoemain.c25 static spinlock_t lock; discover_timer() local
32 spin_lock_init(&lock); discover_timer()
37 spin_lock_irqsave(&lock, flags); discover_timer()
42 spin_unlock_irqrestore(&lock, flags); discover_timer()
47 spin_lock_irqsave(&lock, flags); discover_timer()
49 spin_unlock_irqrestore(&lock, flags); discover_timer()
/linux-4.1.27/include/net/
H A Dgro_cells.h36 spin_lock(&cell->napi_skbs.lock); gro_cells_receive()
42 spin_unlock(&cell->napi_skbs.lock); gro_cells_receive()
52 spin_lock(&cell->napi_skbs.lock); gro_cell_poll()
57 spin_unlock(&cell->napi_skbs.lock); gro_cell_poll()
60 spin_lock(&cell->napi_skbs.lock); gro_cell_poll()
65 spin_unlock(&cell->napi_skbs.lock); gro_cell_poll()
/linux-4.1.27/arch/arm/mach-spear/
H A Dpl080.c23 static spinlock_t lock = __SPIN_LOCK_UNLOCKED(x); variable
35 spin_lock_irqsave(&lock, flags); pl080_get_signal()
40 spin_unlock_irqrestore(&lock, flags); pl080_get_signal()
60 spin_unlock_irqrestore(&lock, flags); pl080_get_signal()
69 spin_lock_irqsave(&lock, flags); pl080_put_signal()
77 spin_unlock_irqrestore(&lock, flags); pl080_put_signal()
/linux-4.1.27/arch/arm/mach-omap1/
H A Dsram.S36 tst r0, #1 << 4 @ want lock mode?
38 bic r0, r0, #1 << 4 @ else clear lock bit
40 orr r0, r0, #1 << 4 @ set lock bit again
52 lock: ldrh r4, [r2], #0 @ read back dpll value label
53 tst r0, #1 << 4 @ want lock mode?
56 beq lock @ try again
/linux-4.1.27/drivers/nfc/
H A Dnfcsim.c38 struct mutex lock; member in struct:nfcsim
69 mutex_lock(&dev->lock); nfcsim_cleanup_dev()
77 mutex_unlock(&dev->lock); nfcsim_cleanup_dev()
103 mutex_lock(&dev->lock); nfcsim_dev_up()
107 mutex_unlock(&dev->lock); nfcsim_dev_up()
118 mutex_lock(&dev->lock); nfcsim_dev_down()
122 mutex_unlock(&dev->lock); nfcsim_dev_down()
139 mutex_lock(&peer->lock); nfcsim_dep_link_up()
148 mutex_unlock(&peer->lock); nfcsim_dep_link_up()
152 mutex_unlock(&peer->lock); nfcsim_dep_link_up()
154 mutex_lock(&dev->lock); nfcsim_dep_link_up()
159 mutex_unlock(&dev->lock); nfcsim_dep_link_up()
166 mutex_unlock(&dev->lock); nfcsim_dep_link_up()
188 mutex_lock(&dev->lock); nfcsim_start_poll()
218 mutex_unlock(&dev->lock); nfcsim_start_poll()
229 mutex_lock(&dev->lock); nfcsim_stop_poll()
233 mutex_unlock(&dev->lock); nfcsim_stop_poll()
261 mutex_lock(&dev->lock); nfcsim_wq_recv()
284 mutex_unlock(&dev->lock); nfcsim_wq_recv()
295 mutex_lock(&dev->lock); nfcsim_tx()
298 mutex_unlock(&dev->lock); nfcsim_tx()
306 mutex_unlock(&dev->lock); nfcsim_tx()
308 mutex_lock(&peer->lock); nfcsim_tx()
314 mutex_unlock(&peer->lock); nfcsim_tx()
328 mutex_unlock(&peer->lock); nfcsim_tx()
394 mutex_lock(&dev->lock); nfcsim_wq_poll()
395 mutex_lock(&peer->lock); nfcsim_wq_poll()
434 mutex_unlock(&peer->lock); nfcsim_wq_poll()
435 mutex_unlock(&dev->lock); nfcsim_wq_poll()
447 mutex_init(&dev->lock); nfcsim_init_dev()
/linux-4.1.27/drivers/staging/gdm72xx/
H A Dgdm_usb.c111 /* Before this function is called, spin lock should be locked. */ get_tx_struct()
129 /* Before this function is called, spin lock should be locked. */ put_tx_struct()
135 /* Before this function is called, spin lock should be locked. */ get_rx_struct()
154 /* Before this function is called, spin lock should be locked. */ put_rx_struct()
168 spin_lock_irqsave(&tx->lock, flags); release_usb()
185 spin_unlock_irqrestore(&tx->lock, flags); release_usb()
187 spin_lock_irqsave(&rx->lock, flags); release_usb()
199 spin_unlock_irqrestore(&rx->lock, flags); release_usb()
221 spin_lock_init(&tx->lock); init_usb()
222 spin_lock_init(&rx->lock); init_usb()
224 spin_lock_irqsave(&tx->lock, flags); init_usb()
228 spin_unlock_irqrestore(&tx->lock, flags); init_usb()
234 spin_unlock_irqrestore(&tx->lock, flags); init_usb()
242 spin_lock_irqsave(&rx->lock, flags); init_usb()
244 spin_unlock_irqrestore(&rx->lock, flags); init_usb()
282 spin_lock_irqsave(&tx->lock, flags); gdm_usb_send_complete()
284 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send_complete()
311 spin_lock_irqsave(&tx->lock, flags); gdm_usb_send()
318 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send()
325 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send()
364 spin_lock_irqsave(&rx->lock, flags2); gdm_usb_send()
367 spin_unlock_irqrestore(&rx->lock, flags2); gdm_usb_send()
390 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send()
400 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send()
419 spin_lock_irqsave(&tx->lock, flags); gdm_usb_rcv_complete()
451 spin_lock_irqsave(&rx->lock, flags2); gdm_usb_rcv_complete()
453 spin_unlock_irqrestore(&rx->lock, flags2); gdm_usb_rcv_complete()
455 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_rcv_complete()
477 spin_lock_irqsave(&rx->lock, flags); gdm_usb_receive()
479 spin_unlock_irqrestore(&rx->lock, flags); gdm_usb_receive()
505 spin_lock_irqsave(&tx->lock, flags); do_pm_control()
520 spin_unlock_irqrestore(&tx->lock, flags); do_pm_control()
657 spin_lock_irqsave(&rx->lock, flags); gdm_suspend()
662 spin_unlock_irqrestore(&rx->lock, flags); gdm_suspend()
682 spin_lock_irqsave(&rx->lock, flags); gdm_resume()
687 spin_unlock_irqrestore(&rx->lock, flags); gdm_resume()
719 spin_lock_irqsave(&rx->lock, flags); k_mode_thread()
724 spin_unlock_irqrestore(&rx->lock, flags); k_mode_thread()
726 spin_lock_irqsave(&tx->lock, flags); k_mode_thread()
740 spin_unlock_irqrestore(&tx->lock, flags); k_mode_thread()
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dqueue.c105 spin_lock_bh(&stats->lock); __cw1200_queue_gc()
109 spin_unlock_bh(&stats->lock); __cw1200_queue_gc()
139 spin_lock_bh(&queue->lock); cw1200_queue_gc()
141 spin_unlock_bh(&queue->lock); cw1200_queue_gc()
154 spin_lock_init(&stats->lock); cw1200_queue_stats_init()
181 spin_lock_init(&queue->lock); cw1200_queue_init()
210 spin_lock_bh(&queue->lock); cw1200_queue_clear()
222 spin_lock_bh(&stats->lock); cw1200_queue_clear()
228 spin_unlock_bh(&stats->lock); cw1200_queue_clear()
233 spin_unlock_bh(&queue->lock); cw1200_queue_clear()
267 spin_lock_bh(&queue->lock); cw1200_queue_get_num_queued()
277 spin_unlock_bh(&queue->lock); cw1200_queue_get_num_queued()
292 spin_lock_bh(&queue->lock); cw1200_queue_put()
311 spin_lock_bh(&stats->lock); cw1200_queue_put()
314 spin_unlock_bh(&stats->lock); cw1200_queue_put()
329 spin_unlock_bh(&queue->lock); cw1200_queue_put()
344 spin_lock_bh(&queue->lock); cw1200_queue_get()
362 spin_lock_bh(&stats->lock); cw1200_queue_get()
366 spin_unlock_bh(&stats->lock); cw1200_queue_get()
368 spin_unlock_bh(&queue->lock); cw1200_queue_get()
386 spin_lock_bh(&queue->lock); cw1200_queue_requeue()
400 spin_lock_bh(&stats->lock); cw1200_queue_requeue()
403 spin_unlock_bh(&stats->lock); cw1200_queue_requeue()
412 spin_unlock_bh(&queue->lock); cw1200_queue_requeue()
420 spin_lock_bh(&queue->lock); cw1200_queue_requeue_all()
426 spin_lock_bh(&stats->lock); cw1200_queue_requeue_all()
429 spin_unlock_bh(&stats->lock); cw1200_queue_requeue_all()
438 spin_unlock_bh(&queue->lock); cw1200_queue_requeue_all()
457 spin_lock_bh(&queue->lock); cw1200_queue_remove()
486 spin_unlock_bh(&queue->lock); cw1200_queue_remove()
506 spin_lock_bh(&queue->lock); cw1200_queue_get_skb()
520 spin_unlock_bh(&queue->lock); cw1200_queue_get_skb()
526 spin_lock_bh(&queue->lock); cw1200_queue_lock()
528 spin_unlock_bh(&queue->lock); cw1200_queue_lock()
533 spin_lock_bh(&queue->lock); cw1200_queue_unlock()
535 spin_unlock_bh(&queue->lock); cw1200_queue_unlock()
545 spin_lock_bh(&queue->lock); cw1200_queue_get_xmit_timestamp()
555 spin_unlock_bh(&queue->lock); cw1200_queue_get_xmit_timestamp()
564 spin_lock_bh(&stats->lock); cw1200_queue_stats_is_empty()
578 spin_unlock_bh(&stats->lock); cw1200_queue_stats_is_empty()
/linux-4.1.27/drivers/clk/hisilicon/
H A Dclkgate-separated.c45 spinlock_t *lock; member in struct:clkgate_separated
55 if (sclk->lock) clkgate_separated_enable()
56 spin_lock_irqsave(sclk->lock, flags); clkgate_separated_enable()
60 if (sclk->lock) clkgate_separated_enable()
61 spin_unlock_irqrestore(sclk->lock, flags); clkgate_separated_enable()
72 if (sclk->lock) clkgate_separated_disable()
73 spin_lock_irqsave(sclk->lock, flags); clkgate_separated_disable()
77 if (sclk->lock) clkgate_separated_disable()
78 spin_unlock_irqrestore(sclk->lock, flags); clkgate_separated_disable()
103 u8 clk_gate_flags, spinlock_t *lock) hisi_register_clkgate_sep()
99 hisi_register_clkgate_sep(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock) hisi_register_clkgate_sep() argument
/linux-4.1.27/arch/arm/mach-imx/
H A Dclk-gate2.c35 spinlock_t *lock; member in struct:clk_gate2
47 spin_lock_irqsave(gate->lock, flags); clk_gate2_enable()
57 spin_unlock_irqrestore(gate->lock, flags); clk_gate2_enable()
68 spin_lock_irqsave(gate->lock, flags); clk_gate2_disable()
82 spin_unlock_irqrestore(gate->lock, flags); clk_gate2_disable()
108 spin_lock_irqsave(gate->lock, flags); clk_gate2_disable_unused()
116 spin_unlock_irqrestore(gate->lock, flags); clk_gate2_disable_unused()
129 u8 clk_gate2_flags, spinlock_t *lock, clk_register_gate2()
144 gate->lock = lock; clk_register_gate2()
126 clk_register_gate2(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate2_flags, spinlock_t *lock, unsigned int *share_count) clk_register_gate2() argument
/linux-4.1.27/net/bridge/
H A Dbr_stp.c51 /* called under bridge lock */ br_get_port()
64 /* called under bridge lock */ br_should_become_root_port()
130 /* called under bridge lock */ br_root_selection()
158 /* called under bridge lock */ br_become_root_bridge()
173 /* called under bridge lock */ br_transmit_config()
213 /* called under bridge lock */ br_record_config_information()
227 /* called under bridge lock */ br_record_config_timeout_values()
237 /* called under bridge lock */ br_transmit_tcn()
250 /* called under bridge lock */ br_should_become_designated_port()
280 /* called under bridge lock */ br_designated_port_selection()
293 /* called under bridge lock */ br_supersedes_port_info()
325 /* called under bridge lock */ br_topology_change_acknowledged()
332 /* called under bridge lock */ br_topology_change_detection()
355 /* called under bridge lock */ br_config_bpdu_generation()
367 /* called under bridge lock */ br_reply()
373 /* called under bridge lock */ br_configuration_update()
380 /* called under bridge lock */ br_become_designated_port()
393 /* called under bridge lock */ br_make_blocking()
410 /* called under bridge lock */ br_make_forwarding()
435 /* called under bridge lock */ br_port_state_selection()
471 /* called under bridge lock */ br_topology_change_acknowledge()
478 /* called under bridge lock */ br_received_config_bpdu()
515 /* called under bridge lock */ br_received_tcn_bpdu()
535 spin_lock_bh(&br->lock); br_set_hello_time()
539 spin_unlock_bh(&br->lock); br_set_hello_time()
550 spin_lock_bh(&br->lock); br_set_max_age()
554 spin_unlock_bh(&br->lock); br_set_max_age()
571 spin_lock_bh(&br->lock); br_set_forward_delay()
580 spin_unlock_bh(&br->lock); br_set_forward_delay()
H A Dbr_stp_timer.c20 /* called under bridge lock */ br_is_designated_for_some_port()
39 spin_lock(&br->lock); br_hello_timer_expired()
45 spin_unlock(&br->lock); br_hello_timer_expired()
67 spin_lock(&br->lock); br_message_age_timer_expired()
78 spin_unlock(&br->lock); br_message_age_timer_expired()
88 spin_lock(&br->lock); br_forward_delay_timer_expired()
103 spin_unlock(&br->lock); br_forward_delay_timer_expired()
111 spin_lock(&br->lock); br_tcn_timer_expired()
117 spin_unlock(&br->lock); br_tcn_timer_expired()
125 spin_lock(&br->lock); br_topology_change_timer_expired()
128 spin_unlock(&br->lock); br_topology_change_timer_expired()
138 spin_lock(&p->br->lock); br_hold_timer_expired()
141 spin_unlock(&p->br->lock); br_hold_timer_expired()
H A Dbr_stp_if.c35 /* called under bridge lock */ br_init_port()
45 /* called under bridge lock */ br_stp_enable_bridge()
50 spin_lock_bh(&br->lock); br_stp_enable_bridge()
61 spin_unlock_bh(&br->lock); br_stp_enable_bridge()
69 spin_lock_bh(&br->lock); br_stp_disable_bridge()
78 spin_unlock_bh(&br->lock); br_stp_disable_bridge()
86 /* called under bridge lock */ br_stp_enable_port()
95 /* called under bridge lock */ br_stp_disable_port()
136 spin_lock_bh(&br->lock); br_stp_start()
154 spin_unlock_bh(&br->lock); br_stp_start()
168 spin_lock_bh(&br->lock); br_stp_stop()
170 spin_unlock_bh(&br->lock); br_stp_stop()
189 /* called under bridge lock */ br_stp_change_bridge_id()
223 /* called under bridge lock */ br_stp_recalculate_bridge_id()
249 /* Acquires and releases bridge lock */ br_stp_set_bridge_priority()
255 spin_lock_bh(&br->lock); br_stp_set_bridge_priority()
273 spin_unlock_bh(&br->lock); br_stp_set_bridge_priority()
276 /* called under bridge lock */ br_stp_set_port_priority()
299 /* called under bridge lock */ br_stp_set_path_cost()
/linux-4.1.27/drivers/rtc/
H A Drtc-tx4939.c22 spinlock_t lock; member in struct:tx4939rtc_plat_data
58 spin_lock_irq(&pdata->lock); tx4939_rtc_set_mmss()
65 spin_unlock_irq(&pdata->lock); tx4939_rtc_set_mmss()
77 spin_lock_irq(&pdata->lock); tx4939_rtc_read_time()
82 spin_unlock_irq(&pdata->lock); tx4939_rtc_read_time()
88 spin_unlock_irq(&pdata->lock); tx4939_rtc_read_time()
116 spin_lock_irq(&pdata->lock); tx4939_rtc_set_alarm()
122 spin_unlock_irq(&pdata->lock); tx4939_rtc_set_alarm()
135 spin_lock_irq(&pdata->lock); tx4939_rtc_read_alarm()
140 spin_unlock_irq(&pdata->lock); tx4939_rtc_read_alarm()
149 spin_unlock_irq(&pdata->lock); tx4939_rtc_read_alarm()
159 spin_lock_irq(&pdata->lock); tx4939_rtc_alarm_irq_enable()
163 spin_unlock_irq(&pdata->lock); tx4939_rtc_alarm_irq_enable()
173 spin_lock(&pdata->lock); tx4939_rtc_interrupt()
178 spin_unlock(&pdata->lock); tx4939_rtc_interrupt()
201 spin_lock_irq(&pdata->lock); tx4939_rtc_nvram_read()
207 spin_unlock_irq(&pdata->lock); tx4939_rtc_nvram_read()
220 spin_lock_irq(&pdata->lock); tx4939_rtc_nvram_write()
226 spin_unlock_irq(&pdata->lock); tx4939_rtc_nvram_write()
260 spin_lock_init(&pdata->lock); tx4939_rtc_probe()
280 spin_lock_irq(&pdata->lock); tx4939_rtc_remove()
282 spin_unlock_irq(&pdata->lock); tx4939_rtc_remove()
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/
H A Dlibcfs_lock.c38 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
51 * create cpu-partition lock, see libcfs_private.h for more detail.
53 * cpu-partition lock is designed for large-scale SMP system, so we need to
61 spinlock_t *lock; cfs_percpt_lock_alloc() local
70 pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock)); cfs_percpt_lock_alloc()
76 cfs_percpt_for_each(lock, i, pcl->pcl_locks) cfs_percpt_lock_alloc()
77 spin_lock_init(lock); cfs_percpt_lock_alloc()
84 * lock a CPU partition
87 * hold private lock indexed by \a index
90 * exclusively lock @pcl and nobody can take private lock
102 } else { /* serialize with exclusive lock */ cfs_percpt_lock()
112 /* exclusive lock request */ cfs_percpt_lock()
117 /* nobody should take private lock after this cfs_percpt_lock()
/linux-4.1.27/arch/sparc/kernel/
H A Debus.c75 spin_lock_irqsave(&p->lock, flags); ebus_dma_irq()
78 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_irq()
134 spin_lock_irqsave(&p->lock, flags); ebus_dma_irq_enable()
138 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_irq_enable()
140 spin_lock_irqsave(&p->lock, flags); ebus_dma_irq_enable()
144 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_irq_enable()
161 spin_lock_irqsave(&p->lock, flags); ebus_dma_unregister()
168 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_unregister()
184 spin_lock_irqsave(&p->lock, flags); ebus_dma_request()
198 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_request()
209 spin_lock_irqsave(&p->lock, flags); ebus_dma_prepare()
224 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_prepare()
245 spin_lock_irqsave(&p->lock, flags); ebus_dma_enable()
254 spin_unlock_irqrestore(&p->lock, flags); ebus_dma_enable()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/
H A Dnamedb.c91 write_lock_irq(&namedb->lock); nvkm_namedb_insert()
98 write_unlock_irq(&namedb->lock); nvkm_namedb_insert()
107 write_lock_irq(&namedb->lock); nvkm_namedb_remove()
109 write_unlock_irq(&namedb->lock); nvkm_namedb_remove()
117 read_lock(&namedb->lock); nvkm_namedb_get()
120 read_unlock(&namedb->lock); nvkm_namedb_get()
128 read_lock(&namedb->lock); nvkm_namedb_get_class()
131 read_unlock(&namedb->lock); nvkm_namedb_get_class()
139 read_lock(&namedb->lock); nvkm_namedb_get_vinst()
142 read_unlock(&namedb->lock); nvkm_namedb_get_vinst()
150 read_lock(&namedb->lock); nvkm_namedb_get_cinst()
153 read_unlock(&namedb->lock); nvkm_namedb_get_cinst()
161 read_unlock(&handle->namedb->lock); nvkm_namedb_put()
180 rwlock_init(&namedb->lock); nvkm_namedb_create_()
/linux-4.1.27/drivers/xen/xen-pciback/
H A Dpassthrough.c14 /* Access to dev_list must be protected by lock */
16 struct mutex lock; member in struct:passthrough_dev_data
28 mutex_lock(&dev_data->lock); __xen_pcibk_get_pci_dev()
39 mutex_unlock(&dev_data->lock); __xen_pcibk_get_pci_dev()
58 mutex_lock(&dev_data->lock); __xen_pcibk_add_pci_dev()
60 mutex_unlock(&dev_data->lock); __xen_pcibk_add_pci_dev()
72 struct pci_dev *dev, bool lock) __xen_pcibk_release_pci_dev()
78 mutex_lock(&dev_data->lock); __xen_pcibk_release_pci_dev()
88 mutex_unlock(&dev_data->lock); __xen_pcibk_release_pci_dev()
91 if (lock) __xen_pcibk_release_pci_dev()
94 if (lock) __xen_pcibk_release_pci_dev()
107 mutex_init(&dev_data->lock); __xen_pcibk_init_devices()
126 mutex_lock(&dev_data->lock); __xen_pcibk_publish_pci_roots()
153 mutex_unlock(&dev_data->lock); __xen_pcibk_publish_pci_roots()
71 __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, bool lock) __xen_pcibk_release_pci_dev() argument
H A Dvpci.c19 /* Access to dev_list must be protected by lock */
21 struct mutex lock; member in struct:vpci_dev_data
42 mutex_lock(&vpci_dev->lock); __xen_pcibk_get_pci_dev()
53 mutex_unlock(&vpci_dev->lock); __xen_pcibk_get_pci_dev()
92 mutex_lock(&vpci_dev->lock); __xen_pcibk_add_pci_dev()
135 mutex_unlock(&vpci_dev->lock); __xen_pcibk_add_pci_dev()
148 struct pci_dev *dev, bool lock) __xen_pcibk_release_pci_dev()
154 mutex_lock(&vpci_dev->lock); __xen_pcibk_release_pci_dev()
170 mutex_unlock(&vpci_dev->lock); __xen_pcibk_release_pci_dev()
173 if (lock) __xen_pcibk_release_pci_dev()
176 if (lock) __xen_pcibk_release_pci_dev()
190 mutex_init(&vpci_dev->lock); __xen_pcibk_init_devices()
239 mutex_lock(&vpci_dev->lock); __xen_pcibk_get_pcifront_dev()
257 mutex_unlock(&vpci_dev->lock); __xen_pcibk_get_pcifront_dev()
147 __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, bool lock) __xen_pcibk_release_pci_dev() argument
/linux-4.1.27/fs/nfs_common/
H A Dgrace.c17 * @net: net namespace that this lock manager belongs to
21 * out. Currently grace periods are only enforced by the two lock
40 * @net: net namespace that this lock manager belongs to
43 * Call this function to state that the given lock manager is ready to
44 * resume regular locking. The grace period will not end until all lock
62 * to answer ordinary lock requests, and when they should accept only
63 * lock reclaims.
/linux-4.1.27/drivers/usb/usbip/
H A Dvhci_sysfs.c38 spin_lock(&the_controller->lock); status_show()
56 spin_lock(&vdev->ud.lock); status_show()
70 spin_unlock(&vdev->ud.lock); status_show()
73 spin_unlock(&the_controller->lock); status_show()
86 /* lock */ vhci_port_disconnect()
87 spin_lock(&the_controller->lock); vhci_port_disconnect()
91 spin_lock(&vdev->ud.lock); vhci_port_disconnect()
96 spin_unlock(&vdev->ud.lock); vhci_port_disconnect()
97 spin_unlock(&the_controller->lock); vhci_port_disconnect()
103 spin_unlock(&vdev->ud.lock); vhci_port_disconnect()
104 spin_unlock(&the_controller->lock); vhci_port_disconnect()
202 /* now need lock until setting vdev status as used */ store_attach()
204 /* begin a lock */ store_attach()
205 spin_lock(&the_controller->lock); store_attach()
207 spin_lock(&vdev->ud.lock); store_attach()
210 /* end of the lock */ store_attach()
211 spin_unlock(&vdev->ud.lock); store_attach()
212 spin_unlock(&the_controller->lock); store_attach()
229 spin_unlock(&vdev->ud.lock); store_attach()
230 spin_unlock(&the_controller->lock); store_attach()
231 /* end the lock */ store_attach()
/linux-4.1.27/drivers/tty/serial/
H A Dsunhv.c220 spin_lock_irqsave(&port->lock, flags); sunhv_interrupt()
223 spin_unlock_irqrestore(&port->lock, flags); sunhv_interrupt()
231 /* port->lock is not held. */ sunhv_tx_empty()
241 /* port->lock held by caller. */ sunhv_set_mctrl()
247 /* port->lock is held by caller and interrupts are disabled. */ sunhv_get_mctrl()
253 /* port->lock held by caller. */ sunhv_stop_tx()
259 /* port->lock held by caller. */ sunhv_start_tx()
265 /* port->lock is not held. */ sunhv_send_xchar()
274 spin_lock_irqsave(&port->lock, flags); sunhv_send_xchar()
283 spin_unlock_irqrestore(&port->lock, flags); sunhv_send_xchar()
286 /* port->lock held by caller. */ sunhv_stop_rx()
291 /* port->lock is not held. */ sunhv_break_ctl()
298 spin_lock_irqsave(&port->lock, flags); sunhv_break_ctl()
307 spin_unlock_irqrestore(&port->lock, flags); sunhv_break_ctl()
311 /* port->lock is not held. */ sunhv_startup()
317 /* port->lock is not held. */ sunhv_shutdown()
322 /* port->lock is not held. */ sunhv_set_termios()
331 spin_lock_irqsave(&port->lock, flags); sunhv_set_termios()
346 spin_unlock_irqrestore(&port->lock, flags); sunhv_set_termios()
434 locked = spin_trylock_irqsave(&port->lock, flags); sunhv_console_write_paged()
436 spin_lock_irqsave(&port->lock, flags); sunhv_console_write_paged()
467 spin_unlock_irqrestore(&port->lock, flags); sunhv_console_write_paged()
489 locked = spin_trylock_irqsave(&port->lock, flags); sunhv_console_write_bychar()
491 spin_lock_irqsave(&port->lock, flags); sunhv_console_write_bychar()
495 locked = spin_trylock(&port->lock); sunhv_console_write_bychar()
497 spin_lock(&port->lock); sunhv_console_write_bychar()
506 spin_unlock_irqrestore(&port->lock, flags); sunhv_console_write_bychar()
/linux-4.1.27/drivers/gpio/
H A Dgpio-mb86s7x.c44 spinlock_t lock; member in struct:mb86s70_gpio_chip
58 spin_lock_irqsave(&gchip->lock, flags); mb86s70_gpio_request()
62 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_request()
69 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_request()
80 spin_lock_irqsave(&gchip->lock, flags); mb86s70_gpio_free()
86 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_free()
95 spin_lock_irqsave(&gchip->lock, flags); mb86s70_gpio_direction_input()
101 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_direction_input()
113 spin_lock_irqsave(&gchip->lock, flags); mb86s70_gpio_direction_output()
126 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_direction_output()
144 spin_lock_irqsave(&gchip->lock, flags); mb86s70_gpio_set()
153 spin_unlock_irqrestore(&gchip->lock, flags); mb86s70_gpio_set()
179 spin_lock_init(&gchip->lock); mb86s70_gpio_probe()
/linux-4.1.27/drivers/dma/
H A Dvirt-dma.h20 /* protected by vc.lock */
29 spinlock_t lock; member in struct:virt_dma_chan
31 /* protected by vc.lock */
70 * vc.lock must be held by caller
82 * vc.lock must be held by caller
114 * vc.lock must be held by caller
129 * vc.lock must be held by caller
147 spin_lock_irqsave(&vc->lock, flags); vchan_free_chan_resources()
149 spin_unlock_irqrestore(&vc->lock, flags); vchan_free_chan_resources()
/linux-4.1.27/net/9p/
H A Dutil.c38 * @lock: protects the pool
44 spinlock_t lock; member in struct:p9_idpool
61 spin_lock_init(&p->lock); p9_idpool_create()
85 * the lock included in struct idr?
94 spin_lock_irqsave(&p->lock, flags); p9_idpool_get()
99 spin_unlock_irqrestore(&p->lock, flags); p9_idpool_get()
115 * the lock included in struct idr?
124 spin_lock_irqsave(&p->lock, flags); p9_idpool_put()
126 spin_unlock_irqrestore(&p->lock, flags); p9_idpool_put()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
H A Dclip_tbl.c88 read_lock_bh(&ctbl->lock); cxgb4_clip_get()
98 read_unlock_bh(&ctbl->lock); cxgb4_clip_get()
102 read_unlock_bh(&ctbl->lock); cxgb4_clip_get()
104 write_lock_bh(&ctbl->lock); cxgb4_clip_get()
110 spin_lock_init(&ce->lock); cxgb4_clip_get()
120 write_unlock_bh(&ctbl->lock); cxgb4_clip_get()
129 write_unlock_bh(&ctbl->lock); cxgb4_clip_get()
132 write_unlock_bh(&ctbl->lock); cxgb4_clip_get()
151 read_lock_bh(&ctbl->lock); cxgb4_clip_release()
161 read_unlock_bh(&ctbl->lock); cxgb4_clip_release()
165 read_unlock_bh(&ctbl->lock); cxgb4_clip_release()
169 write_lock_bh(&ctbl->lock); cxgb4_clip_release()
170 spin_lock_bh(&ce->lock); cxgb4_clip_release()
179 spin_unlock_bh(&ce->lock); cxgb4_clip_release()
180 write_unlock_bh(&ctbl->lock); cxgb4_clip_release()
199 read_lock_bh(&idev->lock); cxgb4_update_dev_clip()
205 read_unlock_bh(&idev->lock); cxgb4_update_dev_clip()
250 read_lock_bh(&ctbl->lock); clip_tbl_show()
263 read_unlock_bh(&ctbl->lock); clip_tbl_show()
292 rwlock_init(&ctbl->lock); t4_init_clip_tbl()
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_fwd.c33 lockdep_assert_held(&ufdev->lock); usnic_fwd_devcmd_locked()
62 spin_lock(&ufdev->lock); usnic_fwd_devcmd()
64 spin_unlock(&ufdev->lock); usnic_fwd_devcmd()
79 spin_lock_init(&ufdev->lock); usnic_fwd_dev_alloc()
93 spin_lock(&ufdev->lock); usnic_fwd_set_mac()
95 spin_unlock(&ufdev->lock); usnic_fwd_set_mac()
102 spin_lock(&ufdev->lock); usnic_fwd_add_ipaddr()
109 spin_unlock(&ufdev->lock); usnic_fwd_add_ipaddr()
116 spin_lock(&ufdev->lock); usnic_fwd_del_ipaddr()
118 spin_unlock(&ufdev->lock); usnic_fwd_del_ipaddr()
123 spin_lock(&ufdev->lock); usnic_fwd_carrier_up()
125 spin_unlock(&ufdev->lock); usnic_fwd_carrier_up()
130 spin_lock(&ufdev->lock); usnic_fwd_carrier_down()
132 spin_unlock(&ufdev->lock); usnic_fwd_carrier_down()
137 spin_lock(&ufdev->lock); usnic_fwd_set_mtu()
139 spin_unlock(&ufdev->lock); usnic_fwd_set_mtu()
144 lockdep_assert_held(&ufdev->lock); usnic_fwd_dev_ready_locked()
156 lockdep_assert_held(&ufdev->lock); validate_filter_locked()
219 spin_lock(&ufdev->lock); usnic_fwd_alloc_flow()
253 spin_unlock(&ufdev->lock); usnic_fwd_alloc_flow()
/linux-4.1.27/arch/powerpc/mm/
H A Dicswx_pid.c30 spinlock_t *lock) new_cop_pid()
39 spin_lock(lock); new_cop_pid()
41 spin_unlock(lock); new_cop_pid()
49 spin_lock(lock); new_cop_pid()
51 spin_unlock(lock); new_cop_pid()
29 new_cop_pid(struct ida *ida, int min_id, int max_id, spinlock_t *lock) new_cop_pid() argument
/linux-4.1.27/tools/perf/scripts/python/
H A Dfutex-contention.py21 lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
46 for (tid, lock) in lock_waits:
47 min, max, avg, count = lock_waits[tid, lock]
48 print "%s[%d] lock %x contended %d times, %d avg ns" % \
49 (process_names[tid], tid, lock, count, avg)
/linux-4.1.27/mm/
H A Dlist_lru.c52 * The lock protects the array of per cgroup lists from relocation list_lru_from_memcg_idx()
55 lockdep_assert_held(&nlru->lock); list_lru_from_memcg_idx()
101 spin_lock(&nlru->lock); list_lru_add()
106 spin_unlock(&nlru->lock); list_lru_add()
109 spin_unlock(&nlru->lock); list_lru_add()
120 spin_lock(&nlru->lock); list_lru_del()
125 spin_unlock(&nlru->lock); list_lru_del()
128 spin_unlock(&nlru->lock); list_lru_del()
155 spin_lock(&nlru->lock); __list_lru_count_one()
158 spin_unlock(&nlru->lock); __list_lru_count_one()
195 spin_lock(&nlru->lock); __list_lru_walk_one()
209 ret = isolate(item, l, &nlru->lock, cb_arg); __list_lru_walk_one()
212 assert_spin_locked(&nlru->lock); __list_lru_walk_one()
216 * If the lru lock has been dropped, our list __list_lru_walk_one()
230 * The lru lock has been dropped, our list traversal is __list_lru_walk_one()
233 assert_spin_locked(&nlru->lock); __list_lru_walk_one()
240 spin_unlock(&nlru->lock); __list_lru_walk_one()
354 * The lock guarantees that we won't race with a reader memcg_update_list_lru_node()
357 * Since list_lru_{add,del} may be called under an IRQ-safe lock, memcg_update_list_lru_node()
360 spin_lock_irq(&nlru->lock); memcg_update_list_lru_node()
362 spin_unlock_irq(&nlru->lock); memcg_update_list_lru_node()
465 * Since list_lru_{add,del} may be called under an IRQ-safe lock, memcg_drain_list_lru_node()
468 spin_lock_irq(&nlru->lock); memcg_drain_list_lru_node()
477 spin_unlock_irq(&nlru->lock); memcg_drain_list_lru_node()
526 spin_lock_init(&lru->node[i].lock); __list_lru_init()
528 lockdep_set_class(&lru->node[i].lock, key); __list_lru_init()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
H A Dl2t.c50 * Module locking notes: There is a RW lock protecting the L2 table as a
52 * under the protection of the table lock, individual entry changes happen
53 * while holding that entry's spinlock. The table lock nests outside the
54 * entry locks. Allocations of new entries take the table lock as writers so
56 * take the table lock as readers so multiple entries can be updated in
121 * Must be called with the entry's lock held.
135 spin_lock_bh(&e->lock); t3_l2t_send_slow()
138 spin_unlock_bh(&e->lock); t3_l2t_send_slow()
142 spin_lock_bh(&e->lock); t3_l2t_send_slow()
145 spin_unlock_bh(&e->lock); t3_l2t_send_slow()
149 spin_unlock_bh(&e->lock); t3_l2t_send_slow()
165 spin_lock_bh(&e->lock); t3_l2t_send_slow()
170 spin_unlock_bh(&e->lock); t3_l2t_send_slow()
184 spin_lock_bh(&e->lock); t3_l2t_send_event()
188 spin_unlock_bh(&e->lock); t3_l2t_send_event()
193 spin_lock_bh(&e->lock); t3_l2t_send_event()
196 spin_unlock_bh(&e->lock); t3_l2t_send_event()
199 spin_unlock_bh(&e->lock); t3_l2t_send_event()
216 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
260 * drops to 0 we need to take the entry's lock to avoid races with a new
265 spin_lock_bh(&e->lock); t3_l2e_free()
272 spin_unlock_bh(&e->lock); t3_l2e_free()
286 spin_lock(&e->lock); /* avoid race with t3_l2t_free */ reuse_entry()
298 spin_unlock(&e->lock); reuse_entry()
332 write_lock_bh(&d->lock); t3_l2t_get()
345 spin_lock(&e->lock); /* avoid race with t3_l2t_free */ t3_l2t_get()
358 spin_unlock(&e->lock); t3_l2t_get()
361 write_unlock_bh(&d->lock); t3_l2t_get()
407 read_lock_bh(&d->lock); t3_l2t_update()
410 spin_lock(&e->lock); t3_l2t_update()
413 read_unlock_bh(&d->lock); t3_l2t_update()
419 read_unlock(&d->lock); t3_l2t_update()
436 spin_unlock_bh(&e->lock); t3_l2t_update()
454 rwlock_init(&d->lock); t3_init_l2t()
460 spin_lock_init(&d->l2tab[i].lock); t3_init_l2t()
/linux-4.1.27/drivers/dma/hsu/
H A Dhsu.c104 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_stop_channel()
107 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_stop_channel()
114 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_start_channel()
116 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_start_channel()
142 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_chan_get_sr()
144 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_chan_get_sr()
178 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_irq()
191 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_irq()
254 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_issue_pending()
257 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_issue_pending()
278 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_active_desc_size()
283 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_active_desc_size()
301 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_tx_status()
311 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_tx_status()
334 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_chan_deactivate()
336 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_chan_deactivate()
343 spin_lock_irqsave(&hsuc->lock, flags); hsu_dma_chan_activate()
345 spin_unlock_irqrestore(&hsuc->lock, flags); hsu_dma_chan_activate()
353 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_pause()
358 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_pause()
368 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_resume()
373 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_resume()
384 spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_terminate_all()
393 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); hsu_dma_terminate_all()
445 spin_lock_init(&hsuc->lock); hsu_dma_probe()
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_reqlist.h18 * @lock: Spinlock for protecting the hash list
22 spinlock_t lock; member in struct:zfcp_reqlist
46 spin_lock_init(&rl->lock); zfcp_reqlist_alloc()
109 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_find()
111 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_find()
122 * id and then removes it from the reqlist. The reqlist lock is held
134 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_find_rm()
138 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_find_rm()
161 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_add()
163 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_add()
177 spin_lock_irqsave(&rl->lock, flags); zfcp_reqlist_move()
180 spin_unlock_irqrestore(&rl->lock, flags); zfcp_reqlist_move()
/linux-4.1.27/drivers/staging/rtl8192e/
H A Drtllib_crypt.c30 spinlock_t lock; member in struct:rtllib_crypto
62 spin_lock_irqsave(info->lock, flags); rtllib_crypt_deinit_handler()
71 spin_unlock_irqrestore(info->lock, flags); rtllib_crypt_deinit_handler()
93 spin_lock_irqsave(info->lock, flags); rtllib_crypt_delayed_deinit()
99 spin_unlock_irqrestore(info->lock, flags); rtllib_crypt_delayed_deinit()
117 spin_lock_irqsave(&hcrypt->lock, flags); rtllib_register_crypto_ops()
119 spin_unlock_irqrestore(&hcrypt->lock, flags); rtllib_register_crypto_ops()
137 spin_lock_irqsave(&hcrypt->lock, flags); rtllib_unregister_crypto_ops()
147 spin_unlock_irqrestore(&hcrypt->lock, flags); rtllib_unregister_crypto_ops()
169 spin_lock_irqsave(&hcrypt->lock, flags); rtllib_get_crypto_ops()
178 spin_unlock_irqrestore(&hcrypt->lock, flags); rtllib_get_crypto_ops()
218 spin_lock_init(&hcrypt->lock); rtllib_crypto_init()
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/
H A Dieee80211_crypt.c34 spinlock_t lock; member in struct:ieee80211_crypto
65 spin_lock_irqsave(&ieee->lock, flags); ieee80211_crypt_deinit_handler()
73 spin_unlock_irqrestore(&ieee->lock, flags); ieee80211_crypt_deinit_handler()
93 spin_lock_irqsave(&ieee->lock, flags); ieee80211_crypt_delayed_deinit()
99 spin_unlock_irqrestore(&ieee->lock, flags); ieee80211_crypt_delayed_deinit()
116 spin_lock_irqsave(&hcrypt->lock, flags); ieee80211_register_crypto_ops()
118 spin_unlock_irqrestore(&hcrypt->lock, flags); ieee80211_register_crypto_ops()
135 spin_lock_irqsave(&hcrypt->lock, flags); ieee80211_unregister_crypto_ops()
145 spin_unlock_irqrestore(&hcrypt->lock, flags); ieee80211_unregister_crypto_ops()
166 spin_lock_irqsave(&hcrypt->lock, flags); ieee80211_get_crypto_ops()
175 spin_unlock_irqrestore(&hcrypt->lock, flags); ieee80211_get_crypto_ops()
211 spin_lock_init(&hcrypt->lock); ieee80211_crypto_init()
/linux-4.1.27/drivers/clk/samsung/
H A Dclk-s5pv210-audss.c26 static DEFINE_SPINLOCK(lock);
122 reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); s5pv210_audss_clk_probe()
133 reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); s5pv210_audss_clk_probe()
137 reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); s5pv210_audss_clk_probe()
140 4, 4, 0, &lock); s5pv210_audss_clk_probe()
144 reg_base + ASS_CLK_GATE, 6, 0, &lock); s5pv210_audss_clk_probe()
150 reg_base + ASS_CLK_GATE, 5, 0, &lock); s5pv210_audss_clk_probe()
153 reg_base + ASS_CLK_GATE, 4, 0, &lock); s5pv210_audss_clk_probe()
156 reg_base + ASS_CLK_GATE, 3, 0, &lock); s5pv210_audss_clk_probe()
159 reg_base + ASS_CLK_GATE, 2, 0, &lock); s5pv210_audss_clk_probe()
162 reg_base + ASS_CLK_GATE, 1, 0, &lock); s5pv210_audss_clk_probe()
165 reg_base + ASS_CLK_GATE, 0, 0, &lock); s5pv210_audss_clk_probe()

Completed in 3582 milliseconds

1234567891011>>