This source file includes following definitions.
- rcu_exp_gp_seq_start
- rcu_exp_gp_seq_endval
- rcu_exp_gp_seq_end
- rcu_exp_gp_seq_snap
- rcu_exp_gp_seq_done
- sync_exp_reset_tree_hotplug
- sync_exp_reset_tree
- sync_rcu_preempt_exp_done
- sync_rcu_preempt_exp_done_unlocked
- __rcu_report_exp_rnp
- rcu_report_exp_rnp
- rcu_report_exp_cpu_mult
- rcu_report_exp_rdp
- sync_exp_work_done
- exp_funnel_lock
- sync_rcu_exp_select_node_cpus
- sync_rcu_exp_select_cpus
- synchronize_sched_expedited_wait
- rcu_exp_wait_wake
- rcu_exp_sel_wait_wake
- wait_rcu_exp_gp
- rcu_exp_handler
- sync_sched_exp_online_cleanup
- rcu_print_task_exp_stall
- rcu_exp_need_qs
- rcu_exp_handler
- sync_sched_exp_online_cleanup
- rcu_print_task_exp_stall
- synchronize_rcu_expedited
1
2
3
4
5
6
7
8
9
10 #include <linux/lockdep.h>
11
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15
16
17
18 static void rcu_exp_gp_seq_start(void)
19 {
20 rcu_seq_start(&rcu_state.expedited_sequence);
21 }
22
23
24
25
26
27 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28 {
29 return rcu_seq_endval(&rcu_state.expedited_sequence);
30 }
31
32
33
34
35 static void rcu_exp_gp_seq_end(void)
36 {
37 rcu_seq_end(&rcu_state.expedited_sequence);
38 smp_mb();
39 }
40
41
42
43
44 static unsigned long rcu_exp_gp_seq_snap(void)
45 {
46 unsigned long s;
47
48 smp_mb();
49 s = rcu_seq_snap(&rcu_state.expedited_sequence);
50 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
51 return s;
52 }
53
54
55
56
57
58
59 static bool rcu_exp_gp_seq_done(unsigned long s)
60 {
61 return rcu_seq_done(&rcu_state.expedited_sequence, s);
62 }
63
64
65
66
67
68
69
70
71 static void sync_exp_reset_tree_hotplug(void)
72 {
73 bool done;
74 unsigned long flags;
75 unsigned long mask;
76 unsigned long oldmask;
77 int ncpus = smp_load_acquire(&rcu_state.ncpus);
78 struct rcu_node *rnp;
79 struct rcu_node *rnp_up;
80
81
82 if (likely(ncpus == rcu_state.ncpus_snap))
83 return;
84 rcu_state.ncpus_snap = ncpus;
85
86
87
88
89
90 rcu_for_each_leaf_node(rnp) {
91 raw_spin_lock_irqsave_rcu_node(rnp, flags);
92 if (rnp->expmaskinit == rnp->expmaskinitnext) {
93 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
94 continue;
95 }
96
97
98 oldmask = rnp->expmaskinit;
99 rnp->expmaskinit = rnp->expmaskinitnext;
100 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
101
102
103 if (oldmask)
104 continue;
105
106
107 mask = rnp->grpmask;
108 rnp_up = rnp->parent;
109 done = false;
110 while (rnp_up) {
111 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
112 if (rnp_up->expmaskinit)
113 done = true;
114 rnp_up->expmaskinit |= mask;
115 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
116 if (done)
117 break;
118 mask = rnp_up->grpmask;
119 rnp_up = rnp_up->parent;
120 }
121 }
122 }
123
124
125
126
127
128 static void __maybe_unused sync_exp_reset_tree(void)
129 {
130 unsigned long flags;
131 struct rcu_node *rnp;
132
133 sync_exp_reset_tree_hotplug();
134 rcu_for_each_node_breadth_first(rnp) {
135 raw_spin_lock_irqsave_rcu_node(rnp, flags);
136 WARN_ON_ONCE(rnp->expmask);
137 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
138 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
139 }
140 }
141
142
143
144
145
146
147
148
149
150
151 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
152 {
153 raw_lockdep_assert_held_rcu_node(rnp);
154
155 return rnp->exp_tasks == NULL &&
156 READ_ONCE(rnp->expmask) == 0;
157 }
158
159
160
161
162
163
164 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
165 {
166 unsigned long flags;
167 bool ret;
168
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_preempt_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172
173 return ret;
174 }
175
176
177
178
179
180
181
182
183
184
185
186
187 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
188 bool wake, unsigned long flags)
189 __releases(rnp->lock)
190 {
191 unsigned long mask;
192
193 for (;;) {
194 if (!sync_rcu_preempt_exp_done(rnp)) {
195 if (!rnp->expmask)
196 rcu_initiate_boost(rnp, flags);
197 else
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 break;
200 }
201 if (rnp->parent == NULL) {
202 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 if (wake) {
204 smp_mb();
205 swake_up_one(&rcu_state.expedited_wq);
206 }
207 break;
208 }
209 mask = rnp->grpmask;
210 raw_spin_unlock_rcu_node(rnp);
211 rnp = rnp->parent;
212 raw_spin_lock_rcu_node(rnp);
213 WARN_ON_ONCE(!(rnp->expmask & mask));
214 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
215 }
216 }
217
218
219
220
221
222 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
223 {
224 unsigned long flags;
225
226 raw_spin_lock_irqsave_rcu_node(rnp, flags);
227 __rcu_report_exp_rnp(rnp, wake, flags);
228 }
229
230
231
232
233
234 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
235 unsigned long mask, bool wake)
236 {
237 unsigned long flags;
238
239 raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 if (!(rnp->expmask & mask)) {
241 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 return;
243 }
244 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
245 __rcu_report_exp_rnp(rnp, wake, flags);
246 }
247
248
249
250
251 static void rcu_report_exp_rdp(struct rcu_data *rdp)
252 {
253 WRITE_ONCE(rdp->exp_deferred_qs, false);
254 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
255 }
256
257
258 static bool sync_exp_work_done(unsigned long s)
259 {
260 if (rcu_exp_gp_seq_done(s)) {
261 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
262 smp_mb();
263 return true;
264 }
265 return false;
266 }
267
268
269
270
271
272
273
274
275 static bool exp_funnel_lock(unsigned long s)
276 {
277 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
278 struct rcu_node *rnp = rdp->mynode;
279 struct rcu_node *rnp_root = rcu_get_root();
280
281
282 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
283 (rnp == rnp_root ||
284 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
285 mutex_trylock(&rcu_state.exp_mutex))
286 goto fastpath;
287
288
289
290
291
292
293
294
295 for (; rnp != NULL; rnp = rnp->parent) {
296 if (sync_exp_work_done(s))
297 return true;
298
299
300 spin_lock(&rnp->exp_lock);
301 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
302
303
304 spin_unlock(&rnp->exp_lock);
305 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
306 rnp->grplo, rnp->grphi,
307 TPS("wait"));
308 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
309 sync_exp_work_done(s));
310 return true;
311 }
312 rnp->exp_seq_rq = s;
313 spin_unlock(&rnp->exp_lock);
314 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
315 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
316 }
317 mutex_lock(&rcu_state.exp_mutex);
318 fastpath:
319 if (sync_exp_work_done(s)) {
320 mutex_unlock(&rcu_state.exp_mutex);
321 return true;
322 }
323 rcu_exp_gp_seq_start();
324 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
325 return false;
326 }
327
328
329
330
331
332 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
333 {
334 int cpu;
335 unsigned long flags;
336 unsigned long mask_ofl_test;
337 unsigned long mask_ofl_ipi;
338 int ret;
339 struct rcu_exp_work *rewp =
340 container_of(wp, struct rcu_exp_work, rew_work);
341 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
342
343 raw_spin_lock_irqsave_rcu_node(rnp, flags);
344
345
346 mask_ofl_test = 0;
347 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
348 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
349 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
350 int snap;
351
352 if (raw_smp_processor_id() == cpu ||
353 !(rnp->qsmaskinitnext & mask)) {
354 mask_ofl_test |= mask;
355 } else {
356 snap = rcu_dynticks_snap(rdp);
357 if (rcu_dynticks_in_eqs(snap))
358 mask_ofl_test |= mask;
359 else
360 rdp->exp_dynticks_snap = snap;
361 }
362 }
363 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
364
365
366
367
368
369
370 if (rcu_preempt_has_tasks(rnp))
371 rnp->exp_tasks = rnp->blkd_tasks.next;
372 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
373
374
375 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
376 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
377 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
378
379 retry_ipi:
380 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
381 mask_ofl_test |= mask;
382 continue;
383 }
384 if (get_cpu() == cpu) {
385 put_cpu();
386 continue;
387 }
388 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
389 put_cpu();
390 if (!ret) {
391 mask_ofl_ipi &= ~mask;
392 continue;
393 }
394
395 raw_spin_lock_irqsave_rcu_node(rnp, flags);
396 if ((rnp->qsmaskinitnext & mask) &&
397 (rnp->expmask & mask)) {
398
399 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
400 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
401 schedule_timeout_uninterruptible(1);
402 goto retry_ipi;
403 }
404
405 if (!(rnp->expmask & mask))
406 mask_ofl_ipi &= ~mask;
407 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
408 }
409
410 mask_ofl_test |= mask_ofl_ipi;
411 if (mask_ofl_test)
412 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
413 }
414
415
416
417
418
419 static void sync_rcu_exp_select_cpus(void)
420 {
421 int cpu;
422 struct rcu_node *rnp;
423
424 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
425 sync_exp_reset_tree();
426 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
427
428
429 rcu_for_each_leaf_node(rnp) {
430 rnp->exp_need_flush = false;
431 if (!READ_ONCE(rnp->expmask))
432 continue;
433 if (!READ_ONCE(rcu_par_gp_wq) ||
434 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
435 rcu_is_last_leaf_node(rnp)) {
436
437 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
438 continue;
439 }
440 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
441 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
442
443 if (unlikely(cpu > rnp->grphi - rnp->grplo))
444 cpu = WORK_CPU_UNBOUND;
445 else
446 cpu += rnp->grplo;
447 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
448 rnp->exp_need_flush = true;
449 }
450
451
452 rcu_for_each_leaf_node(rnp)
453 if (rnp->exp_need_flush)
454 flush_work(&rnp->rew.rew_work);
455 }
456
457 static void synchronize_sched_expedited_wait(void)
458 {
459 int cpu;
460 unsigned long jiffies_stall;
461 unsigned long jiffies_start;
462 unsigned long mask;
463 int ndetected;
464 struct rcu_node *rnp;
465 struct rcu_node *rnp_root = rcu_get_root();
466 int ret;
467
468 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
469 jiffies_stall = rcu_jiffies_till_stall_check();
470 jiffies_start = jiffies;
471
472 for (;;) {
473 ret = swait_event_timeout_exclusive(
474 rcu_state.expedited_wq,
475 sync_rcu_preempt_exp_done_unlocked(rnp_root),
476 jiffies_stall);
477 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
478 return;
479 WARN_ON(ret < 0);
480 if (rcu_cpu_stall_suppress)
481 continue;
482 panic_on_rcu_stall();
483 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
484 rcu_state.name);
485 ndetected = 0;
486 rcu_for_each_leaf_node(rnp) {
487 ndetected += rcu_print_task_exp_stall(rnp);
488 for_each_leaf_node_possible_cpu(rnp, cpu) {
489 struct rcu_data *rdp;
490
491 mask = leaf_node_cpu_bit(rnp, cpu);
492 if (!(READ_ONCE(rnp->expmask) & mask))
493 continue;
494 ndetected++;
495 rdp = per_cpu_ptr(&rcu_data, cpu);
496 pr_cont(" %d-%c%c%c", cpu,
497 "O."[!!cpu_online(cpu)],
498 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
499 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
500 }
501 }
502 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
503 jiffies - jiffies_start, rcu_state.expedited_sequence,
504 READ_ONCE(rnp_root->expmask),
505 ".T"[!!rnp_root->exp_tasks]);
506 if (ndetected) {
507 pr_err("blocking rcu_node structures:");
508 rcu_for_each_node_breadth_first(rnp) {
509 if (rnp == rnp_root)
510 continue;
511 if (sync_rcu_preempt_exp_done_unlocked(rnp))
512 continue;
513 pr_cont(" l=%u:%d-%d:%#lx/%c",
514 rnp->level, rnp->grplo, rnp->grphi,
515 READ_ONCE(rnp->expmask),
516 ".T"[!!rnp->exp_tasks]);
517 }
518 pr_cont("\n");
519 }
520 rcu_for_each_leaf_node(rnp) {
521 for_each_leaf_node_possible_cpu(rnp, cpu) {
522 mask = leaf_node_cpu_bit(rnp, cpu);
523 if (!(READ_ONCE(rnp->expmask) & mask))
524 continue;
525 dump_cpu_task(cpu);
526 }
527 }
528 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
529 }
530 }
531
532
533
534
535
536
537
538 static void rcu_exp_wait_wake(unsigned long s)
539 {
540 struct rcu_node *rnp;
541
542 synchronize_sched_expedited_wait();
543
544
545
546
547 mutex_lock(&rcu_state.exp_wake_mutex);
548 rcu_exp_gp_seq_end();
549 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
550
551 rcu_for_each_node_breadth_first(rnp) {
552 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
553 spin_lock(&rnp->exp_lock);
554
555 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
556 rnp->exp_seq_rq = s;
557 spin_unlock(&rnp->exp_lock);
558 }
559 smp_mb();
560 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
561 }
562 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
563 mutex_unlock(&rcu_state.exp_wake_mutex);
564 }
565
566
567
568
569
570 static void rcu_exp_sel_wait_wake(unsigned long s)
571 {
572
573 sync_rcu_exp_select_cpus();
574
575
576 rcu_exp_wait_wake(s);
577 }
578
579
580
581
582 static void wait_rcu_exp_gp(struct work_struct *wp)
583 {
584 struct rcu_exp_work *rewp;
585
586 rewp = container_of(wp, struct rcu_exp_work, rew_work);
587 rcu_exp_sel_wait_wake(rewp->rew_s);
588 }
589
590 #ifdef CONFIG_PREEMPT_RCU
591
592
593
594
595
596
597
598
599 static void rcu_exp_handler(void *unused)
600 {
601 unsigned long flags;
602 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
603 struct rcu_node *rnp = rdp->mynode;
604 struct task_struct *t = current;
605
606
607
608
609
610
611 if (!t->rcu_read_lock_nesting) {
612 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
613 rcu_dynticks_curr_cpu_in_eqs()) {
614 rcu_report_exp_rdp(rdp);
615 } else {
616 rdp->exp_deferred_qs = true;
617 set_tsk_need_resched(t);
618 set_preempt_need_resched();
619 }
620 return;
621 }
622
623
624
625
626
627
628
629
630
631
632
633
634
635 if (t->rcu_read_lock_nesting > 0) {
636 raw_spin_lock_irqsave_rcu_node(rnp, flags);
637 if (rnp->expmask & rdp->grpmask) {
638 rdp->exp_deferred_qs = true;
639 t->rcu_read_unlock_special.b.exp_hint = true;
640 }
641 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
642 return;
643 }
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661 rdp->exp_deferred_qs = true;
662 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
663 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
664 rcu_preempt_deferred_qs(t);
665 } else {
666 set_tsk_need_resched(t);
667 set_preempt_need_resched();
668 }
669 }
670
671
672 static void sync_sched_exp_online_cleanup(int cpu)
673 {
674 }
675
676
677
678
679
680
681 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
682 {
683 struct task_struct *t;
684 int ndetected = 0;
685
686 if (!rnp->exp_tasks)
687 return 0;
688 t = list_entry(rnp->exp_tasks->prev,
689 struct task_struct, rcu_node_entry);
690 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
691 pr_cont(" P%d", t->pid);
692 ndetected++;
693 }
694 return ndetected;
695 }
696
697 #else
698
699
700 static void rcu_exp_need_qs(void)
701 {
702 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
703
704 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
705 set_tsk_need_resched(current);
706 set_preempt_need_resched();
707 }
708
709
710 static void rcu_exp_handler(void *unused)
711 {
712 struct rcu_data *rdp;
713 struct rcu_node *rnp;
714
715 rdp = this_cpu_ptr(&rcu_data);
716 rnp = rdp->mynode;
717 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
718 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
719 return;
720 if (rcu_is_cpu_rrupt_from_idle()) {
721 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
722 return;
723 }
724 rcu_exp_need_qs();
725 }
726
727
728 static void sync_sched_exp_online_cleanup(int cpu)
729 {
730 unsigned long flags;
731 int my_cpu;
732 struct rcu_data *rdp;
733 int ret;
734 struct rcu_node *rnp;
735
736 rdp = per_cpu_ptr(&rcu_data, cpu);
737 rnp = rdp->mynode;
738 my_cpu = get_cpu();
739
740 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
741 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
742 put_cpu();
743 return;
744 }
745
746 if (my_cpu == cpu) {
747 local_irq_save(flags);
748 rcu_exp_need_qs();
749 local_irq_restore(flags);
750 put_cpu();
751 return;
752 }
753
754 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
755 put_cpu();
756 WARN_ON_ONCE(ret);
757 }
758
759
760
761
762
763
764 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
765 {
766 return 0;
767 }
768
769 #endif
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791 void synchronize_rcu_expedited(void)
792 {
793 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
794 struct rcu_exp_work rew;
795 struct rcu_node *rnp;
796 unsigned long s;
797
798 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
799 lock_is_held(&rcu_lock_map) ||
800 lock_is_held(&rcu_sched_lock_map),
801 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
802
803
804 if (rcu_blocking_is_gp())
805 return;
806
807
808 if (rcu_gp_is_normal()) {
809 wait_rcu_gp(call_rcu);
810 return;
811 }
812
813
814 s = rcu_exp_gp_seq_snap();
815 if (exp_funnel_lock(s))
816 return;
817
818
819 if (unlikely(boottime)) {
820
821 rcu_exp_sel_wait_wake(s);
822 } else {
823
824 rew.rew_s = s;
825 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
826 queue_work(rcu_gp_wq, &rew.rew_work);
827 }
828
829
830 rnp = rcu_get_root();
831 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
832 sync_exp_work_done(s));
833 smp_mb();
834
835
836 mutex_unlock(&rcu_state.exp_mutex);
837
838 if (likely(!boottime))
839 destroy_work_on_stack(&rew.rew_work);
840 }
841 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);