This source file includes following definitions.
- do_raw_spin_unlock
- do_raw_spin_lock_flags
- do_raw_spin_trylock
- do_raw_spin_unlock
- spinlock_check
- spin_lock
- spin_lock_bh
- spin_trylock
- spin_lock_irq
- spin_unlock
- spin_unlock_bh
- spin_unlock_irq
- spin_unlock_irqrestore
- spin_trylock_bh
- spin_trylock_irq
- spin_is_locked
- spin_is_contended
1
2 #ifndef __LINUX_SPINLOCK_H
3 #define __LINUX_SPINLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 #include <linux/typecheck.h>
51 #include <linux/preempt.h>
52 #include <linux/linkage.h>
53 #include <linux/compiler.h>
54 #include <linux/irqflags.h>
55 #include <linux/thread_info.h>
56 #include <linux/kernel.h>
57 #include <linux/stringify.h>
58 #include <linux/bottom_half.h>
59 #include <asm/barrier.h>
60 #include <asm/mmiowb.h>
61
62
63
64
65
66 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
67
68 #define LOCK_SECTION_START(extra) \
69 ".subsection 1\n\t" \
70 extra \
71 ".ifndef " LOCK_SECTION_NAME "\n\t" \
72 LOCK_SECTION_NAME ":\n\t" \
73 ".endif\n"
74
75 #define LOCK_SECTION_END \
76 ".previous\n\t"
77
78 #define __lockfunc __attribute__((section(".spinlock.text")))
79
80
81
82
83 #include <linux/spinlock_types.h>
84
85
86
87
88 #ifdef CONFIG_SMP
89 # include <asm/spinlock.h>
90 #else
91 # include <linux/spinlock_up.h>
92 #endif
93
94 #ifdef CONFIG_DEBUG_SPINLOCK
95 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96 struct lock_class_key *key);
97 # define raw_spin_lock_init(lock) \
98 do { \
99 static struct lock_class_key __key; \
100 \
101 __raw_spin_lock_init((lock), #lock, &__key); \
102 } while (0)
103
104 #else
105 # define raw_spin_lock_init(lock) \
106 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
107 #endif
108
109 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
110
111 #ifdef arch_spin_is_contended
112 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
113 #else
114 #define raw_spin_is_contended(lock) (((void)(lock), 0))
115 #endif
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168 #ifndef smp_mb__after_spinlock
169 #define smp_mb__after_spinlock() do { } while (0)
170 #endif
171
172 #ifdef CONFIG_DEBUG_SPINLOCK
173 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
174 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
175 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
176 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
177 #else
178 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
179 {
180 __acquire(lock);
181 arch_spin_lock(&lock->raw_lock);
182 mmiowb_spin_lock();
183 }
184
185 #ifndef arch_spin_lock_flags
186 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
187 #endif
188
189 static inline void
190 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
191 {
192 __acquire(lock);
193 arch_spin_lock_flags(&lock->raw_lock, *flags);
194 mmiowb_spin_lock();
195 }
196
197 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
198 {
199 int ret = arch_spin_trylock(&(lock)->raw_lock);
200
201 if (ret)
202 mmiowb_spin_lock();
203
204 return ret;
205 }
206
207 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
208 {
209 mmiowb_spin_unlock();
210 arch_spin_unlock(&lock->raw_lock);
211 __release(lock);
212 }
213 #endif
214
215
216
217
218
219
220
221 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
222
223 #define raw_spin_lock(lock) _raw_spin_lock(lock)
224
225 #ifdef CONFIG_DEBUG_LOCK_ALLOC
226 # define raw_spin_lock_nested(lock, subclass) \
227 _raw_spin_lock_nested(lock, subclass)
228
229 # define raw_spin_lock_nest_lock(lock, nest_lock) \
230 do { \
231 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
232 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
233 } while (0)
234 #else
235
236
237
238
239
240 # define raw_spin_lock_nested(lock, subclass) \
241 _raw_spin_lock(((void)(subclass), (lock)))
242 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
243 #endif
244
245 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
246
247 #define raw_spin_lock_irqsave(lock, flags) \
248 do { \
249 typecheck(unsigned long, flags); \
250 flags = _raw_spin_lock_irqsave(lock); \
251 } while (0)
252
253 #ifdef CONFIG_DEBUG_LOCK_ALLOC
254 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
255 do { \
256 typecheck(unsigned long, flags); \
257 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
258 } while (0)
259 #else
260 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
261 do { \
262 typecheck(unsigned long, flags); \
263 flags = _raw_spin_lock_irqsave(lock); \
264 } while (0)
265 #endif
266
267 #else
268
269 #define raw_spin_lock_irqsave(lock, flags) \
270 do { \
271 typecheck(unsigned long, flags); \
272 _raw_spin_lock_irqsave(lock, flags); \
273 } while (0)
274
275 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
276 raw_spin_lock_irqsave(lock, flags)
277
278 #endif
279
280 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
281 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
282 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
283 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
284
285 #define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \
287 typecheck(unsigned long, flags); \
288 _raw_spin_unlock_irqrestore(lock, flags); \
289 } while (0)
290 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
291
292 #define raw_spin_trylock_bh(lock) \
293 __cond_lock(lock, _raw_spin_trylock_bh(lock))
294
295 #define raw_spin_trylock_irq(lock) \
296 ({ \
297 local_irq_disable(); \
298 raw_spin_trylock(lock) ? \
299 1 : ({ local_irq_enable(); 0; }); \
300 })
301
302 #define raw_spin_trylock_irqsave(lock, flags) \
303 ({ \
304 local_irq_save(flags); \
305 raw_spin_trylock(lock) ? \
306 1 : ({ local_irq_restore(flags); 0; }); \
307 })
308
309
310 #include <linux/rwlock.h>
311
312
313
314
315 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
316 # include <linux/spinlock_api_smp.h>
317 #else
318 # include <linux/spinlock_api_up.h>
319 #endif
320
321
322
323
324
325 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
326 {
327 return &lock->rlock;
328 }
329
330 #define spin_lock_init(_lock) \
331 do { \
332 spinlock_check(_lock); \
333 raw_spin_lock_init(&(_lock)->rlock); \
334 } while (0)
335
336 static __always_inline void spin_lock(spinlock_t *lock)
337 {
338 raw_spin_lock(&lock->rlock);
339 }
340
341 static __always_inline void spin_lock_bh(spinlock_t *lock)
342 {
343 raw_spin_lock_bh(&lock->rlock);
344 }
345
346 static __always_inline int spin_trylock(spinlock_t *lock)
347 {
348 return raw_spin_trylock(&lock->rlock);
349 }
350
351 #define spin_lock_nested(lock, subclass) \
352 do { \
353 raw_spin_lock_nested(spinlock_check(lock), subclass); \
354 } while (0)
355
356 #define spin_lock_nest_lock(lock, nest_lock) \
357 do { \
358 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
359 } while (0)
360
361 static __always_inline void spin_lock_irq(spinlock_t *lock)
362 {
363 raw_spin_lock_irq(&lock->rlock);
364 }
365
366 #define spin_lock_irqsave(lock, flags) \
367 do { \
368 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
369 } while (0)
370
371 #define spin_lock_irqsave_nested(lock, flags, subclass) \
372 do { \
373 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
374 } while (0)
375
376 static __always_inline void spin_unlock(spinlock_t *lock)
377 {
378 raw_spin_unlock(&lock->rlock);
379 }
380
381 static __always_inline void spin_unlock_bh(spinlock_t *lock)
382 {
383 raw_spin_unlock_bh(&lock->rlock);
384 }
385
386 static __always_inline void spin_unlock_irq(spinlock_t *lock)
387 {
388 raw_spin_unlock_irq(&lock->rlock);
389 }
390
391 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
392 {
393 raw_spin_unlock_irqrestore(&lock->rlock, flags);
394 }
395
396 static __always_inline int spin_trylock_bh(spinlock_t *lock)
397 {
398 return raw_spin_trylock_bh(&lock->rlock);
399 }
400
401 static __always_inline int spin_trylock_irq(spinlock_t *lock)
402 {
403 return raw_spin_trylock_irq(&lock->rlock);
404 }
405
406 #define spin_trylock_irqsave(lock, flags) \
407 ({ \
408 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
409 })
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429 static __always_inline int spin_is_locked(spinlock_t *lock)
430 {
431 return raw_spin_is_locked(&lock->rlock);
432 }
433
434 static __always_inline int spin_is_contended(spinlock_t *lock)
435 {
436 return raw_spin_is_contended(&lock->rlock);
437 }
438
439 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
440
441
442
443
444
445 #include <linux/atomic.h>
446
447
448
449
450
451
452
453
454 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
455 #define atomic_dec_and_lock(atomic, lock) \
456 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
457
458 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
459 unsigned long *flags);
460 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
461 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
462
463 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
464 size_t max_size, unsigned int cpu_mult,
465 gfp_t gfp, const char *name,
466 struct lock_class_key *key);
467
468 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
469 ({ \
470 static struct lock_class_key key; \
471 int ret; \
472 \
473 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
474 cpu_mult, gfp, #locks, &key); \
475 ret; \
476 })
477
478 void free_bucket_spinlocks(spinlock_t *locks);
479
480 #endif