This source file includes following definitions.
- __seqcount_init
- seqcount_lockdep_reader_access
- __read_seqcount_begin
- raw_read_seqcount
- raw_read_seqcount_begin
- read_seqcount_begin
- raw_seqcount_begin
- __read_seqcount_retry
- read_seqcount_retry
- raw_write_seqcount_begin
- raw_write_seqcount_end
- raw_write_seqcount_barrier
- raw_read_seqcount_latch
- raw_write_seqcount_latch
- write_seqcount_begin_nested
- write_seqcount_begin
- write_seqcount_end
- write_seqcount_invalidate
- read_seqbegin
- read_seqretry
- write_seqlock
- write_sequnlock
- write_seqlock_bh
- write_sequnlock_bh
- write_seqlock_irq
- write_sequnlock_irq
- __write_seqlock_irqsave
- write_sequnlock_irqrestore
- read_seqlock_excl
- read_sequnlock_excl
- read_seqbegin_or_lock
- need_seqretry
- done_seqretry
- read_seqlock_excl_bh
- read_sequnlock_excl_bh
- read_seqlock_excl_irq
- read_sequnlock_excl_irq
- __read_seqlock_excl_irqsave
- read_sequnlock_excl_irqrestore
- read_seqbegin_or_lock_irqsave
- done_seqretry_irqrestore
1
2 #ifndef __LINUX_SEQLOCK_H
3 #define __LINUX_SEQLOCK_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/spinlock.h>
37 #include <linux/preempt.h>
38 #include <linux/lockdep.h>
39 #include <linux/compiler.h>
40 #include <asm/processor.h>
41
42
43
44
45
46
47
48 typedef struct seqcount {
49 unsigned sequence;
50 #ifdef CONFIG_DEBUG_LOCK_ALLOC
51 struct lockdep_map dep_map;
52 #endif
53 } seqcount_t;
54
55 static inline void __seqcount_init(seqcount_t *s, const char *name,
56 struct lock_class_key *key)
57 {
58
59
60
61 lockdep_init_map(&s->dep_map, name, key, 0);
62 s->sequence = 0;
63 }
64
65 #ifdef CONFIG_DEBUG_LOCK_ALLOC
66 # define SEQCOUNT_DEP_MAP_INIT(lockname) \
67 .dep_map = { .name = #lockname } \
68
69 # define seqcount_init(s) \
70 do { \
71 static struct lock_class_key __key; \
72 __seqcount_init((s), #s, &__key); \
73 } while (0)
74
75 static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
76 {
77 seqcount_t *l = (seqcount_t *)s;
78 unsigned long flags;
79
80 local_irq_save(flags);
81 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
82 seqcount_release(&l->dep_map, 1, _RET_IP_);
83 local_irq_restore(flags);
84 }
85
86 #else
87 # define SEQCOUNT_DEP_MAP_INIT(lockname)
88 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
89 # define seqcount_lockdep_reader_access(x)
90 #endif
91
92 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
109 {
110 unsigned ret;
111
112 repeat:
113 ret = READ_ONCE(s->sequence);
114 if (unlikely(ret & 1)) {
115 cpu_relax();
116 goto repeat;
117 }
118 return ret;
119 }
120
121
122
123
124
125
126
127
128
129
130 static inline unsigned raw_read_seqcount(const seqcount_t *s)
131 {
132 unsigned ret = READ_ONCE(s->sequence);
133 smp_rmb();
134 return ret;
135 }
136
137
138
139
140
141
142
143
144
145
146 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
147 {
148 unsigned ret = __read_seqcount_begin(s);
149 smp_rmb();
150 return ret;
151 }
152
153
154
155
156
157
158
159
160
161
162 static inline unsigned read_seqcount_begin(const seqcount_t *s)
163 {
164 seqcount_lockdep_reader_access(s);
165 return raw_read_seqcount_begin(s);
166 }
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
183 {
184 unsigned ret = READ_ONCE(s->sequence);
185 smp_rmb();
186 return ret & ~1;
187 }
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
204 {
205 return unlikely(s->sequence != start);
206 }
207
208
209
210
211
212
213
214
215
216
217
218 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
219 {
220 smp_rmb();
221 return __read_seqcount_retry(s, start);
222 }
223
224
225
226 static inline void raw_write_seqcount_begin(seqcount_t *s)
227 {
228 s->sequence++;
229 smp_wmb();
230 }
231
232 static inline void raw_write_seqcount_end(seqcount_t *s)
233 {
234 smp_wmb();
235 s->sequence++;
236 }
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272 static inline void raw_write_seqcount_barrier(seqcount_t *s)
273 {
274 s->sequence++;
275 smp_wmb();
276 s->sequence++;
277 }
278
279 static inline int raw_read_seqcount_latch(seqcount_t *s)
280 {
281
282 int seq = READ_ONCE(s->sequence);
283 return seq;
284 }
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 static inline void raw_write_seqcount_latch(seqcount_t *s)
364 {
365 smp_wmb();
366 s->sequence++;
367 smp_wmb();
368 }
369
370
371
372
373
374 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
375 {
376 raw_write_seqcount_begin(s);
377 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
378 }
379
380 static inline void write_seqcount_begin(seqcount_t *s)
381 {
382 write_seqcount_begin_nested(s, 0);
383 }
384
385 static inline void write_seqcount_end(seqcount_t *s)
386 {
387 seqcount_release(&s->dep_map, 1, _RET_IP_);
388 raw_write_seqcount_end(s);
389 }
390
391
392
393
394
395
396
397
398 static inline void write_seqcount_invalidate(seqcount_t *s)
399 {
400 smp_wmb();
401 s->sequence+=2;
402 }
403
404 typedef struct {
405 struct seqcount seqcount;
406 spinlock_t lock;
407 } seqlock_t;
408
409
410
411
412
413 #define __SEQLOCK_UNLOCKED(lockname) \
414 { \
415 .seqcount = SEQCNT_ZERO(lockname), \
416 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
417 }
418
419 #define seqlock_init(x) \
420 do { \
421 seqcount_init(&(x)->seqcount); \
422 spin_lock_init(&(x)->lock); \
423 } while (0)
424
425 #define DEFINE_SEQLOCK(x) \
426 seqlock_t x = __SEQLOCK_UNLOCKED(x)
427
428
429
430
431 static inline unsigned read_seqbegin(const seqlock_t *sl)
432 {
433 return read_seqcount_begin(&sl->seqcount);
434 }
435
436 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
437 {
438 return read_seqcount_retry(&sl->seqcount, start);
439 }
440
441
442
443
444
445
446 static inline void write_seqlock(seqlock_t *sl)
447 {
448 spin_lock(&sl->lock);
449 write_seqcount_begin(&sl->seqcount);
450 }
451
452 static inline void write_sequnlock(seqlock_t *sl)
453 {
454 write_seqcount_end(&sl->seqcount);
455 spin_unlock(&sl->lock);
456 }
457
458 static inline void write_seqlock_bh(seqlock_t *sl)
459 {
460 spin_lock_bh(&sl->lock);
461 write_seqcount_begin(&sl->seqcount);
462 }
463
464 static inline void write_sequnlock_bh(seqlock_t *sl)
465 {
466 write_seqcount_end(&sl->seqcount);
467 spin_unlock_bh(&sl->lock);
468 }
469
470 static inline void write_seqlock_irq(seqlock_t *sl)
471 {
472 spin_lock_irq(&sl->lock);
473 write_seqcount_begin(&sl->seqcount);
474 }
475
476 static inline void write_sequnlock_irq(seqlock_t *sl)
477 {
478 write_seqcount_end(&sl->seqcount);
479 spin_unlock_irq(&sl->lock);
480 }
481
482 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
483 {
484 unsigned long flags;
485
486 spin_lock_irqsave(&sl->lock, flags);
487 write_seqcount_begin(&sl->seqcount);
488 return flags;
489 }
490
491 #define write_seqlock_irqsave(lock, flags) \
492 do { flags = __write_seqlock_irqsave(lock); } while (0)
493
494 static inline void
495 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
496 {
497 write_seqcount_end(&sl->seqcount);
498 spin_unlock_irqrestore(&sl->lock, flags);
499 }
500
501
502
503
504
505
506 static inline void read_seqlock_excl(seqlock_t *sl)
507 {
508 spin_lock(&sl->lock);
509 }
510
511 static inline void read_sequnlock_excl(seqlock_t *sl)
512 {
513 spin_unlock(&sl->lock);
514 }
515
516
517
518
519
520
521
522
523
524
525
526 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
527 {
528 if (!(*seq & 1))
529 *seq = read_seqbegin(lock);
530 else
531 read_seqlock_excl(lock);
532 }
533
534 static inline int need_seqretry(seqlock_t *lock, int seq)
535 {
536 return !(seq & 1) && read_seqretry(lock, seq);
537 }
538
539 static inline void done_seqretry(seqlock_t *lock, int seq)
540 {
541 if (seq & 1)
542 read_sequnlock_excl(lock);
543 }
544
545 static inline void read_seqlock_excl_bh(seqlock_t *sl)
546 {
547 spin_lock_bh(&sl->lock);
548 }
549
550 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
551 {
552 spin_unlock_bh(&sl->lock);
553 }
554
555 static inline void read_seqlock_excl_irq(seqlock_t *sl)
556 {
557 spin_lock_irq(&sl->lock);
558 }
559
560 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
561 {
562 spin_unlock_irq(&sl->lock);
563 }
564
565 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
566 {
567 unsigned long flags;
568
569 spin_lock_irqsave(&sl->lock, flags);
570 return flags;
571 }
572
573 #define read_seqlock_excl_irqsave(lock, flags) \
574 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
575
576 static inline void
577 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
578 {
579 spin_unlock_irqrestore(&sl->lock, flags);
580 }
581
582 static inline unsigned long
583 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
584 {
585 unsigned long flags = 0;
586
587 if (!(*seq & 1))
588 *seq = read_seqbegin(lock);
589 else
590 read_seqlock_excl_irqsave(lock, flags);
591
592 return flags;
593 }
594
595 static inline void
596 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
597 {
598 if (seq & 1)
599 read_sequnlock_excl_irqrestore(lock, flags);
600 }
601 #endif