This source file includes following definitions.
- init_srcu_struct_nodes
- init_srcu_struct_fields
- __init_srcu_struct
- init_srcu_struct
- check_init_srcu_struct
- srcu_readers_lock_idx
- srcu_readers_unlock_idx
- srcu_readers_active_idx_check
- srcu_readers_active
- srcu_get_delay
- cleanup_srcu_struct
- __srcu_read_lock
- __srcu_read_unlock
- srcu_gp_start
- srcu_delay_timer
- srcu_queue_delayed_work_on
- srcu_schedule_cbs_sdp
- srcu_schedule_cbs_snp
- srcu_gp_end
- srcu_funnel_exp_start
- srcu_funnel_gp_start
- try_check_zero
- srcu_flip
- srcu_might_be_idle
- srcu_leak_callback
- __call_srcu
- call_srcu
- __synchronize_srcu
- synchronize_srcu_expedited
- synchronize_srcu
- srcu_barrier_cb
- srcu_barrier
- srcu_batches_completed
- srcu_advance_state
- srcu_invoke_callbacks
- srcu_reschedule
- process_srcu
- srcutorture_get_gp_data
- srcu_torture_stats_print
- srcu_bootup_announce
- srcu_init
- srcu_module_coming
- srcu_module_going
- srcu_module_notify
- init_srcu_module_notifier
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #define pr_fmt(fmt) "rcu: " fmt
17
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/srcu.h>
28
29 #include "rcu.h"
30 #include "rcu_segcblist.h"
31
32
33 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
34 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
35 module_param(exp_holdoff, ulong, 0444);
36
37
38 static ulong counter_wrap_check = (ULONG_MAX >> 2);
39 module_param(counter_wrap_check, ulong, 0444);
40
41
42 static LIST_HEAD(srcu_boot_list);
43 static bool __read_mostly srcu_init_done;
44
45 static void srcu_invoke_callbacks(struct work_struct *work);
46 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
47 static void process_srcu(struct work_struct *work);
48 static void srcu_delay_timer(struct timer_list *t);
49
50
51 #define spin_lock_rcu_node(p) \
52 do { \
53 spin_lock(&ACCESS_PRIVATE(p, lock)); \
54 smp_mb__after_unlock_lock(); \
55 } while (0)
56
57 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
58
59 #define spin_lock_irq_rcu_node(p) \
60 do { \
61 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
62 smp_mb__after_unlock_lock(); \
63 } while (0)
64
65 #define spin_unlock_irq_rcu_node(p) \
66 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
67
68 #define spin_lock_irqsave_rcu_node(p, flags) \
69 do { \
70 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
71 smp_mb__after_unlock_lock(); \
72 } while (0)
73
74 #define spin_unlock_irqrestore_rcu_node(p, flags) \
75 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
76
77
78
79
80
81
82
83 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
84 {
85 int cpu;
86 int i;
87 int level = 0;
88 int levelspread[RCU_NUM_LVLS];
89 struct srcu_data *sdp;
90 struct srcu_node *snp;
91 struct srcu_node *snp_first;
92
93
94 ssp->level[0] = &ssp->node[0];
95 for (i = 1; i < rcu_num_lvls; i++)
96 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
97 rcu_init_levelspread(levelspread, num_rcu_lvl);
98
99
100 srcu_for_each_node_breadth_first(ssp, snp) {
101 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
102 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
103 ARRAY_SIZE(snp->srcu_data_have_cbs));
104 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
105 snp->srcu_have_cbs[i] = 0;
106 snp->srcu_data_have_cbs[i] = 0;
107 }
108 snp->srcu_gp_seq_needed_exp = 0;
109 snp->grplo = -1;
110 snp->grphi = -1;
111 if (snp == &ssp->node[0]) {
112
113 snp->srcu_parent = NULL;
114 continue;
115 }
116
117
118 if (snp == ssp->level[level + 1])
119 level++;
120 snp->srcu_parent = ssp->level[level - 1] +
121 (snp - ssp->level[level]) /
122 levelspread[level - 1];
123 }
124
125
126
127
128
129 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
130 ARRAY_SIZE(sdp->srcu_unlock_count));
131 level = rcu_num_lvls - 1;
132 snp_first = ssp->level[level];
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 sdp->mynode = &snp_first[cpu / levelspread[level]];
141 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
142 if (snp->grplo < 0)
143 snp->grplo = cpu;
144 snp->grphi = cpu;
145 }
146 sdp->cpu = cpu;
147 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
148 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
149 sdp->ssp = ssp;
150 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
151 if (is_static)
152 continue;
153
154
155 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
156 sdp->srcu_lock_count[i] = 0;
157 sdp->srcu_unlock_count[i] = 0;
158 }
159 }
160 }
161
162
163
164
165
166
167
168 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
169 {
170 mutex_init(&ssp->srcu_cb_mutex);
171 mutex_init(&ssp->srcu_gp_mutex);
172 ssp->srcu_idx = 0;
173 ssp->srcu_gp_seq = 0;
174 ssp->srcu_barrier_seq = 0;
175 mutex_init(&ssp->srcu_barrier_mutex);
176 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
177 INIT_DELAYED_WORK(&ssp->work, process_srcu);
178 if (!is_static)
179 ssp->sda = alloc_percpu(struct srcu_data);
180 init_srcu_struct_nodes(ssp, is_static);
181 ssp->srcu_gp_seq_needed_exp = 0;
182 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
183 smp_store_release(&ssp->srcu_gp_seq_needed, 0);
184 return ssp->sda ? 0 : -ENOMEM;
185 }
186
187 #ifdef CONFIG_DEBUG_LOCK_ALLOC
188
189 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
190 struct lock_class_key *key)
191 {
192
193 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
194 lockdep_init_map(&ssp->dep_map, name, key, 0);
195 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
196 return init_srcu_struct_fields(ssp, false);
197 }
198 EXPORT_SYMBOL_GPL(__init_srcu_struct);
199
200 #else
201
202
203
204
205
206
207
208
209
210 int init_srcu_struct(struct srcu_struct *ssp)
211 {
212 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
213 return init_srcu_struct_fields(ssp, false);
214 }
215 EXPORT_SYMBOL_GPL(init_srcu_struct);
216
217 #endif
218
219
220
221
222
223
224
225
226
227 static void check_init_srcu_struct(struct srcu_struct *ssp)
228 {
229 unsigned long flags;
230
231
232 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed)))
233 return;
234 spin_lock_irqsave_rcu_node(ssp, flags);
235 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
236 spin_unlock_irqrestore_rcu_node(ssp, flags);
237 return;
238 }
239 init_srcu_struct_fields(ssp, true);
240 spin_unlock_irqrestore_rcu_node(ssp, flags);
241 }
242
243
244
245
246
247 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
248 {
249 int cpu;
250 unsigned long sum = 0;
251
252 for_each_possible_cpu(cpu) {
253 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
254
255 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
256 }
257 return sum;
258 }
259
260
261
262
263
264 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
265 {
266 int cpu;
267 unsigned long sum = 0;
268
269 for_each_possible_cpu(cpu) {
270 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
271
272 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
273 }
274 return sum;
275 }
276
277
278
279
280
281 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
282 {
283 unsigned long unlocks;
284
285 unlocks = srcu_readers_unlock_idx(ssp, idx);
286
287
288
289
290
291
292
293
294
295
296
297
298 smp_mb();
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 return srcu_readers_lock_idx(ssp, idx) == unlocks;
322 }
323
324
325
326
327
328
329
330
331
332
333 static bool srcu_readers_active(struct srcu_struct *ssp)
334 {
335 int cpu;
336 unsigned long sum = 0;
337
338 for_each_possible_cpu(cpu) {
339 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
340
341 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
342 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
343 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
344 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
345 }
346 return sum;
347 }
348
349 #define SRCU_INTERVAL 1
350
351
352
353
354
355 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
356 {
357 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
358 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
359 return 0;
360 return SRCU_INTERVAL;
361 }
362
363
364
365
366
367
368
369
370 void cleanup_srcu_struct(struct srcu_struct *ssp)
371 {
372 int cpu;
373
374 if (WARN_ON(!srcu_get_delay(ssp)))
375 return;
376 if (WARN_ON(srcu_readers_active(ssp)))
377 return;
378 flush_delayed_work(&ssp->work);
379 for_each_possible_cpu(cpu) {
380 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
381
382 del_timer_sync(&sdp->delay_work);
383 flush_work(&sdp->work);
384 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
385 return;
386 }
387 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
388 WARN_ON(srcu_readers_active(ssp))) {
389 pr_info("%s: Active srcu_struct %p state: %d\n",
390 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
391 return;
392 }
393 free_percpu(ssp->sda);
394 ssp->sda = NULL;
395 }
396 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
397
398
399
400
401
402
403 int __srcu_read_lock(struct srcu_struct *ssp)
404 {
405 int idx;
406
407 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
408 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
409 smp_mb();
410 return idx;
411 }
412 EXPORT_SYMBOL_GPL(__srcu_read_lock);
413
414
415
416
417
418
419 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
420 {
421 smp_mb();
422 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
423 }
424 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
425
426
427
428
429
430
431
432
433 #define SRCU_RETRY_CHECK_DELAY 5
434
435
436
437
438 static void srcu_gp_start(struct srcu_struct *ssp)
439 {
440 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
441 int state;
442
443 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
444 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
445 spin_lock_rcu_node(sdp);
446 rcu_segcblist_advance(&sdp->srcu_cblist,
447 rcu_seq_current(&ssp->srcu_gp_seq));
448 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
449 rcu_seq_snap(&ssp->srcu_gp_seq));
450 spin_unlock_rcu_node(sdp);
451 smp_mb();
452 rcu_seq_start(&ssp->srcu_gp_seq);
453 state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
454 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
455 }
456
457
458 static void srcu_delay_timer(struct timer_list *t)
459 {
460 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
461
462 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
463 }
464
465 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
466 unsigned long delay)
467 {
468 if (!delay) {
469 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
470 return;
471 }
472
473 timer_reduce(&sdp->delay_work, jiffies + delay);
474 }
475
476
477
478
479
480 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
481 {
482 srcu_queue_delayed_work_on(sdp, delay);
483 }
484
485
486
487
488
489
490
491 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
492 unsigned long mask, unsigned long delay)
493 {
494 int cpu;
495
496 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
497 if (!(mask & (1 << (cpu - snp->grplo))))
498 continue;
499 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
500 }
501 }
502
503
504
505
506
507
508
509
510
511
512 static void srcu_gp_end(struct srcu_struct *ssp)
513 {
514 unsigned long cbdelay;
515 bool cbs;
516 bool last_lvl;
517 int cpu;
518 unsigned long flags;
519 unsigned long gpseq;
520 int idx;
521 unsigned long mask;
522 struct srcu_data *sdp;
523 struct srcu_node *snp;
524
525
526 mutex_lock(&ssp->srcu_cb_mutex);
527
528
529 spin_lock_irq_rcu_node(ssp);
530 idx = rcu_seq_state(ssp->srcu_gp_seq);
531 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
532 cbdelay = srcu_get_delay(ssp);
533 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
534 rcu_seq_end(&ssp->srcu_gp_seq);
535 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
536 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
537 ssp->srcu_gp_seq_needed_exp = gpseq;
538 spin_unlock_irq_rcu_node(ssp);
539 mutex_unlock(&ssp->srcu_gp_mutex);
540
541
542
543 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
544 srcu_for_each_node_breadth_first(ssp, snp) {
545 spin_lock_irq_rcu_node(snp);
546 cbs = false;
547 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
548 if (last_lvl)
549 cbs = snp->srcu_have_cbs[idx] == gpseq;
550 snp->srcu_have_cbs[idx] = gpseq;
551 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
552 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
553 snp->srcu_gp_seq_needed_exp = gpseq;
554 mask = snp->srcu_data_have_cbs[idx];
555 snp->srcu_data_have_cbs[idx] = 0;
556 spin_unlock_irq_rcu_node(snp);
557 if (cbs)
558 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
559
560
561 if (!(gpseq & counter_wrap_check) && last_lvl)
562 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
563 sdp = per_cpu_ptr(ssp->sda, cpu);
564 spin_lock_irqsave_rcu_node(sdp, flags);
565 if (ULONG_CMP_GE(gpseq,
566 sdp->srcu_gp_seq_needed + 100))
567 sdp->srcu_gp_seq_needed = gpseq;
568 if (ULONG_CMP_GE(gpseq,
569 sdp->srcu_gp_seq_needed_exp + 100))
570 sdp->srcu_gp_seq_needed_exp = gpseq;
571 spin_unlock_irqrestore_rcu_node(sdp, flags);
572 }
573 }
574
575
576 mutex_unlock(&ssp->srcu_cb_mutex);
577
578
579 spin_lock_irq_rcu_node(ssp);
580 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
581 if (!rcu_seq_state(gpseq) &&
582 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
583 srcu_gp_start(ssp);
584 spin_unlock_irq_rcu_node(ssp);
585 srcu_reschedule(ssp, 0);
586 } else {
587 spin_unlock_irq_rcu_node(ssp);
588 }
589 }
590
591
592
593
594
595
596
597
598 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
599 unsigned long s)
600 {
601 unsigned long flags;
602
603 for (; snp != NULL; snp = snp->srcu_parent) {
604 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
605 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
606 return;
607 spin_lock_irqsave_rcu_node(snp, flags);
608 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
609 spin_unlock_irqrestore_rcu_node(snp, flags);
610 return;
611 }
612 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
613 spin_unlock_irqrestore_rcu_node(snp, flags);
614 }
615 spin_lock_irqsave_rcu_node(ssp, flags);
616 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
617 ssp->srcu_gp_seq_needed_exp = s;
618 spin_unlock_irqrestore_rcu_node(ssp, flags);
619 }
620
621
622
623
624
625
626
627
628
629
630
631 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
632 unsigned long s, bool do_norm)
633 {
634 unsigned long flags;
635 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
636 struct srcu_node *snp = sdp->mynode;
637 unsigned long snp_seq;
638
639
640 for (; snp != NULL; snp = snp->srcu_parent) {
641 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
642 return;
643 spin_lock_irqsave_rcu_node(snp, flags);
644 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
645 snp_seq = snp->srcu_have_cbs[idx];
646 if (snp == sdp->mynode && snp_seq == s)
647 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
648 spin_unlock_irqrestore_rcu_node(snp, flags);
649 if (snp == sdp->mynode && snp_seq != s) {
650 srcu_schedule_cbs_sdp(sdp, do_norm
651 ? SRCU_INTERVAL
652 : 0);
653 return;
654 }
655 if (!do_norm)
656 srcu_funnel_exp_start(ssp, snp, s);
657 return;
658 }
659 snp->srcu_have_cbs[idx] = s;
660 if (snp == sdp->mynode)
661 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
662 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
663 snp->srcu_gp_seq_needed_exp = s;
664 spin_unlock_irqrestore_rcu_node(snp, flags);
665 }
666
667
668 spin_lock_irqsave_rcu_node(ssp, flags);
669 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
670
671
672
673
674 smp_store_release(&ssp->srcu_gp_seq_needed, s);
675 }
676 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
677 ssp->srcu_gp_seq_needed_exp = s;
678
679
680 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
681 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
682 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
683 srcu_gp_start(ssp);
684 if (likely(srcu_init_done))
685 queue_delayed_work(rcu_gp_wq, &ssp->work,
686 srcu_get_delay(ssp));
687 else if (list_empty(&ssp->work.work.entry))
688 list_add(&ssp->work.work.entry, &srcu_boot_list);
689 }
690 spin_unlock_irqrestore_rcu_node(ssp, flags);
691 }
692
693
694
695
696
697
698 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
699 {
700 for (;;) {
701 if (srcu_readers_active_idx_check(ssp, idx))
702 return true;
703 if (--trycount + !srcu_get_delay(ssp) <= 0)
704 return false;
705 udelay(SRCU_RETRY_CHECK_DELAY);
706 }
707 }
708
709
710
711
712
713
714 static void srcu_flip(struct srcu_struct *ssp)
715 {
716
717
718
719
720
721
722
723
724 smp_mb();
725
726 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
727
728
729
730
731
732
733
734
735 smp_mb();
736 }
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759 static bool srcu_might_be_idle(struct srcu_struct *ssp)
760 {
761 unsigned long curseq;
762 unsigned long flags;
763 struct srcu_data *sdp;
764 unsigned long t;
765 unsigned long tlast;
766
767
768 local_irq_save(flags);
769 sdp = this_cpu_ptr(ssp->sda);
770 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
771 local_irq_restore(flags);
772 return false;
773 }
774 local_irq_restore(flags);
775
776
777
778
779
780
781
782
783 t = ktime_get_mono_fast_ns();
784 tlast = READ_ONCE(ssp->srcu_last_gp_end);
785 if (exp_holdoff == 0 ||
786 time_in_range_open(t, tlast, tlast + exp_holdoff))
787 return false;
788
789
790 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
791 smp_mb();
792 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
793 return false;
794 smp_mb();
795 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
796 return false;
797 return true;
798 }
799
800
801
802
803 static void srcu_leak_callback(struct rcu_head *rhp)
804 {
805 }
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
836 rcu_callback_t func, bool do_norm)
837 {
838 unsigned long flags;
839 int idx;
840 bool needexp = false;
841 bool needgp = false;
842 unsigned long s;
843 struct srcu_data *sdp;
844
845 check_init_srcu_struct(ssp);
846 if (debug_rcu_head_queue(rhp)) {
847
848 WRITE_ONCE(rhp->func, srcu_leak_callback);
849 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
850 return;
851 }
852 rhp->func = func;
853 idx = srcu_read_lock(ssp);
854 local_irq_save(flags);
855 sdp = this_cpu_ptr(ssp->sda);
856 spin_lock_rcu_node(sdp);
857 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
858 rcu_segcblist_advance(&sdp->srcu_cblist,
859 rcu_seq_current(&ssp->srcu_gp_seq));
860 s = rcu_seq_snap(&ssp->srcu_gp_seq);
861 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
862 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
863 sdp->srcu_gp_seq_needed = s;
864 needgp = true;
865 }
866 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
867 sdp->srcu_gp_seq_needed_exp = s;
868 needexp = true;
869 }
870 spin_unlock_irqrestore_rcu_node(sdp, flags);
871 if (needgp)
872 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
873 else if (needexp)
874 srcu_funnel_exp_start(ssp, sdp->mynode, s);
875 srcu_read_unlock(ssp, idx);
876 }
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
896 rcu_callback_t func)
897 {
898 __call_srcu(ssp, rhp, func, true);
899 }
900 EXPORT_SYMBOL_GPL(call_srcu);
901
902
903
904
905 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
906 {
907 struct rcu_synchronize rcu;
908
909 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
910 lock_is_held(&rcu_bh_lock_map) ||
911 lock_is_held(&rcu_lock_map) ||
912 lock_is_held(&rcu_sched_lock_map),
913 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
914
915 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
916 return;
917 might_sleep();
918 check_init_srcu_struct(ssp);
919 init_completion(&rcu.completion);
920 init_rcu_head_on_stack(&rcu.head);
921 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
922 wait_for_completion(&rcu.completion);
923 destroy_rcu_head_on_stack(&rcu.head);
924
925
926
927
928
929
930
931
932 smp_mb();
933 }
934
935
936
937
938
939
940
941
942
943
944
945 void synchronize_srcu_expedited(struct srcu_struct *ssp)
946 {
947 __synchronize_srcu(ssp, rcu_gp_is_normal());
948 }
949 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995 void synchronize_srcu(struct srcu_struct *ssp)
996 {
997 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
998 synchronize_srcu_expedited(ssp);
999 else
1000 __synchronize_srcu(ssp, true);
1001 }
1002 EXPORT_SYMBOL_GPL(synchronize_srcu);
1003
1004
1005
1006
1007 static void srcu_barrier_cb(struct rcu_head *rhp)
1008 {
1009 struct srcu_data *sdp;
1010 struct srcu_struct *ssp;
1011
1012 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1013 ssp = sdp->ssp;
1014 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1015 complete(&ssp->srcu_barrier_completion);
1016 }
1017
1018
1019
1020
1021
1022 void srcu_barrier(struct srcu_struct *ssp)
1023 {
1024 int cpu;
1025 struct srcu_data *sdp;
1026 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1027
1028 check_init_srcu_struct(ssp);
1029 mutex_lock(&ssp->srcu_barrier_mutex);
1030 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1031 smp_mb();
1032 mutex_unlock(&ssp->srcu_barrier_mutex);
1033 return;
1034 }
1035 rcu_seq_start(&ssp->srcu_barrier_seq);
1036 init_completion(&ssp->srcu_barrier_completion);
1037
1038
1039 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 for_each_possible_cpu(cpu) {
1050 sdp = per_cpu_ptr(ssp->sda, cpu);
1051 spin_lock_irq_rcu_node(sdp);
1052 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1053 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1054 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1055 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1056 &sdp->srcu_barrier_head, 0)) {
1057 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1058 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1059 }
1060 spin_unlock_irq_rcu_node(sdp);
1061 }
1062
1063
1064 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1065 complete(&ssp->srcu_barrier_completion);
1066 wait_for_completion(&ssp->srcu_barrier_completion);
1067
1068 rcu_seq_end(&ssp->srcu_barrier_seq);
1069 mutex_unlock(&ssp->srcu_barrier_mutex);
1070 }
1071 EXPORT_SYMBOL_GPL(srcu_barrier);
1072
1073
1074
1075
1076
1077
1078
1079
1080 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1081 {
1082 return ssp->srcu_idx;
1083 }
1084 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1085
1086
1087
1088
1089
1090
1091 static void srcu_advance_state(struct srcu_struct *ssp)
1092 {
1093 int idx;
1094
1095 mutex_lock(&ssp->srcu_gp_mutex);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq));
1108 if (idx == SRCU_STATE_IDLE) {
1109 spin_lock_irq_rcu_node(ssp);
1110 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1111 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1112 spin_unlock_irq_rcu_node(ssp);
1113 mutex_unlock(&ssp->srcu_gp_mutex);
1114 return;
1115 }
1116 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1117 if (idx == SRCU_STATE_IDLE)
1118 srcu_gp_start(ssp);
1119 spin_unlock_irq_rcu_node(ssp);
1120 if (idx != SRCU_STATE_IDLE) {
1121 mutex_unlock(&ssp->srcu_gp_mutex);
1122 return;
1123 }
1124 }
1125
1126 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1127 idx = 1 ^ (ssp->srcu_idx & 1);
1128 if (!try_check_zero(ssp, idx, 1)) {
1129 mutex_unlock(&ssp->srcu_gp_mutex);
1130 return;
1131 }
1132 srcu_flip(ssp);
1133 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1134 }
1135
1136 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1137
1138
1139
1140
1141
1142 idx = 1 ^ (ssp->srcu_idx & 1);
1143 if (!try_check_zero(ssp, idx, 2)) {
1144 mutex_unlock(&ssp->srcu_gp_mutex);
1145 return;
1146 }
1147 srcu_gp_end(ssp);
1148 }
1149 }
1150
1151
1152
1153
1154
1155
1156
1157 static void srcu_invoke_callbacks(struct work_struct *work)
1158 {
1159 bool more;
1160 struct rcu_cblist ready_cbs;
1161 struct rcu_head *rhp;
1162 struct srcu_data *sdp;
1163 struct srcu_struct *ssp;
1164
1165 sdp = container_of(work, struct srcu_data, work);
1166
1167 ssp = sdp->ssp;
1168 rcu_cblist_init(&ready_cbs);
1169 spin_lock_irq_rcu_node(sdp);
1170 rcu_segcblist_advance(&sdp->srcu_cblist,
1171 rcu_seq_current(&ssp->srcu_gp_seq));
1172 if (sdp->srcu_cblist_invoking ||
1173 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1174 spin_unlock_irq_rcu_node(sdp);
1175 return;
1176 }
1177
1178
1179 sdp->srcu_cblist_invoking = true;
1180 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1181 spin_unlock_irq_rcu_node(sdp);
1182 rhp = rcu_cblist_dequeue(&ready_cbs);
1183 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1184 debug_rcu_head_unqueue(rhp);
1185 local_bh_disable();
1186 rhp->func(rhp);
1187 local_bh_enable();
1188 }
1189
1190
1191
1192
1193
1194 spin_lock_irq_rcu_node(sdp);
1195 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1196 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1197 rcu_seq_snap(&ssp->srcu_gp_seq));
1198 sdp->srcu_cblist_invoking = false;
1199 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1200 spin_unlock_irq_rcu_node(sdp);
1201 if (more)
1202 srcu_schedule_cbs_sdp(sdp, 0);
1203 }
1204
1205
1206
1207
1208
1209 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1210 {
1211 bool pushgp = true;
1212
1213 spin_lock_irq_rcu_node(ssp);
1214 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1215 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1216
1217 pushgp = false;
1218 }
1219 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1220
1221 srcu_gp_start(ssp);
1222 }
1223 spin_unlock_irq_rcu_node(ssp);
1224
1225 if (pushgp)
1226 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1227 }
1228
1229
1230
1231
1232 static void process_srcu(struct work_struct *work)
1233 {
1234 struct srcu_struct *ssp;
1235
1236 ssp = container_of(work, struct srcu_struct, work.work);
1237
1238 srcu_advance_state(ssp);
1239 srcu_reschedule(ssp, srcu_get_delay(ssp));
1240 }
1241
1242 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1243 struct srcu_struct *ssp, int *flags,
1244 unsigned long *gp_seq)
1245 {
1246 if (test_type != SRCU_FLAVOR)
1247 return;
1248 *flags = 0;
1249 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1250 }
1251 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1252
1253 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1254 {
1255 int cpu;
1256 int idx;
1257 unsigned long s0 = 0, s1 = 0;
1258
1259 idx = ssp->srcu_idx & 0x1;
1260 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1261 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1262 for_each_possible_cpu(cpu) {
1263 unsigned long l0, l1;
1264 unsigned long u0, u1;
1265 long c0, c1;
1266 struct srcu_data *sdp;
1267
1268 sdp = per_cpu_ptr(ssp->sda, cpu);
1269 u0 = sdp->srcu_unlock_count[!idx];
1270 u1 = sdp->srcu_unlock_count[idx];
1271
1272
1273
1274
1275
1276 smp_rmb();
1277
1278 l0 = sdp->srcu_lock_count[!idx];
1279 l1 = sdp->srcu_lock_count[idx];
1280
1281 c0 = l0 - u0;
1282 c1 = l1 - u1;
1283 pr_cont(" %d(%ld,%ld %c)",
1284 cpu, c0, c1,
1285 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1286 s0 += c0;
1287 s1 += c1;
1288 }
1289 pr_cont(" T(%ld,%ld)\n", s0, s1);
1290 }
1291 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1292
1293 static int __init srcu_bootup_announce(void)
1294 {
1295 pr_info("Hierarchical SRCU implementation.\n");
1296 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1297 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1298 return 0;
1299 }
1300 early_initcall(srcu_bootup_announce);
1301
1302 void __init srcu_init(void)
1303 {
1304 struct srcu_struct *ssp;
1305
1306 srcu_init_done = true;
1307 while (!list_empty(&srcu_boot_list)) {
1308 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1309 work.work.entry);
1310 check_init_srcu_struct(ssp);
1311 list_del_init(&ssp->work.work.entry);
1312 queue_work(rcu_gp_wq, &ssp->work.work);
1313 }
1314 }
1315
1316 #ifdef CONFIG_MODULES
1317
1318
1319 static int srcu_module_coming(struct module *mod)
1320 {
1321 int i;
1322 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1323 int ret;
1324
1325 for (i = 0; i < mod->num_srcu_structs; i++) {
1326 ret = init_srcu_struct(*(sspp++));
1327 if (WARN_ON_ONCE(ret))
1328 return ret;
1329 }
1330 return 0;
1331 }
1332
1333
1334 static void srcu_module_going(struct module *mod)
1335 {
1336 int i;
1337 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1338
1339 for (i = 0; i < mod->num_srcu_structs; i++)
1340 cleanup_srcu_struct(*(sspp++));
1341 }
1342
1343
1344 static int srcu_module_notify(struct notifier_block *self,
1345 unsigned long val, void *data)
1346 {
1347 struct module *mod = data;
1348 int ret = 0;
1349
1350 switch (val) {
1351 case MODULE_STATE_COMING:
1352 ret = srcu_module_coming(mod);
1353 break;
1354 case MODULE_STATE_GOING:
1355 srcu_module_going(mod);
1356 break;
1357 default:
1358 break;
1359 }
1360 return ret;
1361 }
1362
1363 static struct notifier_block srcu_module_nb = {
1364 .notifier_call = srcu_module_notify,
1365 .priority = 0,
1366 };
1367
1368 static __init int init_srcu_module_notifier(void)
1369 {
1370 int ret;
1371
1372 ret = register_module_notifier(&srcu_module_nb);
1373 if (ret)
1374 pr_warn("Failed to register srcu module notifier\n");
1375 return ret;
1376 }
1377 late_initcall(init_srcu_module_notifier);
1378
1379 #endif