This source file includes following definitions.
- rcu_read_lock_held_common
- rcu_read_lock_sched_held
- rcu_gp_is_normal
- rcu_gp_is_expedited
- rcu_expedite_gp
- rcu_unexpedite_gp
- rcu_end_inkernel_boot
- rcu_test_sync_prims
- rcu_set_runtime_mode
- debug_lockdep_rcu_enabled
- rcu_read_lock_held
- rcu_read_lock_bh_held
- rcu_read_lock_any_held
- wakeme_after_rcu
- __wait_rcu_gp
- init_rcu_head
- destroy_rcu_head
- rcuhead_is_static_object
- init_rcu_head_on_stack
- destroy_rcu_head_on_stack
- do_trace_rcu_torture_read
- rcutorture_sched_setaffinity
- call_rcu_tasks
- synchronize_rcu_tasks
- rcu_barrier_tasks
- check_holdout_task
- rcu_tasks_kthread
- rcu_spawn_tasks_kthread
- exit_tasks_rcu_start
- exit_tasks_rcu_finish
- rcu_tasks_bootup_oddness
- test_callback
- early_boot_test_call_rcu
- rcu_early_boot_tests
- rcu_verify_early_boot_tests
- rcu_early_boot_tests
- rcupdate_announce_bootup_oddness
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/debug.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/percpu.h>
31 #include <linux/notifier.h>
32 #include <linux/cpu.h>
33 #include <linux/mutex.h>
34 #include <linux/export.h>
35 #include <linux/hardirq.h>
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/kthread.h>
39 #include <linux/tick.h>
40 #include <linux/rcupdate_wait.h>
41 #include <linux/sched/isolation.h>
42 #include <linux/kprobes.h>
43
44 #define CREATE_TRACE_POINTS
45
46 #include "rcu.h"
47
48 #ifdef MODULE_PARAM_PREFIX
49 #undef MODULE_PARAM_PREFIX
50 #endif
51 #define MODULE_PARAM_PREFIX "rcupdate."
52
53 #ifndef CONFIG_TINY_RCU
54 extern int rcu_expedited;
55 module_param(rcu_expedited, int, 0);
56 extern int rcu_normal;
57 module_param(rcu_normal, int, 0);
58 static int rcu_normal_after_boot;
59 module_param(rcu_normal_after_boot, int, 0);
60 #endif
61
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99 static bool rcu_read_lock_held_common(bool *ret)
100 {
101 if (!debug_lockdep_rcu_enabled()) {
102 *ret = 1;
103 return true;
104 }
105 if (!rcu_is_watching()) {
106 *ret = 0;
107 return true;
108 }
109 if (!rcu_lockdep_current_cpu_online()) {
110 *ret = 0;
111 return true;
112 }
113 return false;
114 }
115
116 int rcu_read_lock_sched_held(void)
117 {
118 bool ret;
119
120 if (rcu_read_lock_held_common(&ret))
121 return ret;
122 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
123 }
124 EXPORT_SYMBOL(rcu_read_lock_sched_held);
125 #endif
126
127 #ifndef CONFIG_TINY_RCU
128
129
130
131
132
133
134
135
136
137 bool rcu_gp_is_normal(void)
138 {
139 return READ_ONCE(rcu_normal) &&
140 rcu_scheduler_active != RCU_SCHEDULER_INIT;
141 }
142 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
143
144 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
145
146
147
148
149
150
151
152
153 bool rcu_gp_is_expedited(void)
154 {
155 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
156 }
157 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
158
159
160
161
162
163
164
165
166 void rcu_expedite_gp(void)
167 {
168 atomic_inc(&rcu_expedited_nesting);
169 }
170 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
171
172
173
174
175
176
177
178
179
180
181 void rcu_unexpedite_gp(void)
182 {
183 atomic_dec(&rcu_expedited_nesting);
184 }
185 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
186
187
188
189
190 void rcu_end_inkernel_boot(void)
191 {
192 rcu_unexpedite_gp();
193 if (rcu_normal_after_boot)
194 WRITE_ONCE(rcu_normal, 1);
195 }
196
197 #endif
198
199
200
201
202
203
204 void rcu_test_sync_prims(void)
205 {
206 if (!IS_ENABLED(CONFIG_PROVE_RCU))
207 return;
208 synchronize_rcu();
209 synchronize_rcu_expedited();
210 }
211
212 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
213
214
215
216
217 static int __init rcu_set_runtime_mode(void)
218 {
219 rcu_test_sync_prims();
220 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
221 rcu_test_sync_prims();
222 return 0;
223 }
224 core_initcall(rcu_set_runtime_mode);
225
226 #endif
227
228 #ifdef CONFIG_DEBUG_LOCK_ALLOC
229 static struct lock_class_key rcu_lock_key;
230 struct lockdep_map rcu_lock_map =
231 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
232 EXPORT_SYMBOL_GPL(rcu_lock_map);
233
234 static struct lock_class_key rcu_bh_lock_key;
235 struct lockdep_map rcu_bh_lock_map =
236 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
237 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
238
239 static struct lock_class_key rcu_sched_lock_key;
240 struct lockdep_map rcu_sched_lock_map =
241 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
242 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
243
244 static struct lock_class_key rcu_callback_key;
245 struct lockdep_map rcu_callback_map =
246 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
247 EXPORT_SYMBOL_GPL(rcu_callback_map);
248
249 int notrace debug_lockdep_rcu_enabled(void)
250 {
251 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
252 current->lockdep_recursion == 0;
253 }
254 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
255 NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277 int rcu_read_lock_held(void)
278 {
279 bool ret;
280
281 if (rcu_read_lock_held_common(&ret))
282 return ret;
283 return lock_is_held(&rcu_lock_map);
284 }
285 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302 int rcu_read_lock_bh_held(void)
303 {
304 bool ret;
305
306 if (rcu_read_lock_held_common(&ret))
307 return ret;
308 return in_softirq() || irqs_disabled();
309 }
310 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
311
312 int rcu_read_lock_any_held(void)
313 {
314 bool ret;
315
316 if (rcu_read_lock_held_common(&ret))
317 return ret;
318 if (lock_is_held(&rcu_lock_map) ||
319 lock_is_held(&rcu_bh_lock_map) ||
320 lock_is_held(&rcu_sched_lock_map))
321 return 1;
322 return !preemptible();
323 }
324 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
325
326 #endif
327
328
329
330
331
332
333
334 void wakeme_after_rcu(struct rcu_head *head)
335 {
336 struct rcu_synchronize *rcu;
337
338 rcu = container_of(head, struct rcu_synchronize, head);
339 complete(&rcu->completion);
340 }
341 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
342
343 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
344 struct rcu_synchronize *rs_array)
345 {
346 int i;
347 int j;
348
349
350 for (i = 0; i < n; i++) {
351 if (checktiny &&
352 (crcu_array[i] == call_rcu)) {
353 might_sleep();
354 continue;
355 }
356 init_rcu_head_on_stack(&rs_array[i].head);
357 init_completion(&rs_array[i].completion);
358 for (j = 0; j < i; j++)
359 if (crcu_array[j] == crcu_array[i])
360 break;
361 if (j == i)
362 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
363 }
364
365
366 for (i = 0; i < n; i++) {
367 if (checktiny &&
368 (crcu_array[i] == call_rcu))
369 continue;
370 for (j = 0; j < i; j++)
371 if (crcu_array[j] == crcu_array[i])
372 break;
373 if (j == i)
374 wait_for_completion(&rs_array[i].completion);
375 destroy_rcu_head_on_stack(&rs_array[i].head);
376 }
377 }
378 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
379
380 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
381 void init_rcu_head(struct rcu_head *head)
382 {
383 debug_object_init(head, &rcuhead_debug_descr);
384 }
385 EXPORT_SYMBOL_GPL(init_rcu_head);
386
387 void destroy_rcu_head(struct rcu_head *head)
388 {
389 debug_object_free(head, &rcuhead_debug_descr);
390 }
391 EXPORT_SYMBOL_GPL(destroy_rcu_head);
392
393 static bool rcuhead_is_static_object(void *addr)
394 {
395 return true;
396 }
397
398
399
400
401
402
403
404
405
406
407
408 void init_rcu_head_on_stack(struct rcu_head *head)
409 {
410 debug_object_init_on_stack(head, &rcuhead_debug_descr);
411 }
412 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
413
414
415
416
417
418
419
420
421
422
423
424
425 void destroy_rcu_head_on_stack(struct rcu_head *head)
426 {
427 debug_object_free(head, &rcuhead_debug_descr);
428 }
429 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
430
431 struct debug_obj_descr rcuhead_debug_descr = {
432 .name = "rcu_head",
433 .is_static_object = rcuhead_is_static_object,
434 };
435 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
436 #endif
437
438 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
439 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
440 unsigned long secs,
441 unsigned long c_old, unsigned long c)
442 {
443 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
444 }
445 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
446 #else
447 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
448 do { } while (0)
449 #endif
450
451 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
452
453 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
454 {
455 int ret;
456
457 ret = sched_setaffinity(pid, in_mask);
458 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
459 return ret;
460 }
461 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
462 #endif
463
464 #ifdef CONFIG_RCU_STALL_COMMON
465 int rcu_cpu_stall_ftrace_dump __read_mostly;
466 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
467 int rcu_cpu_stall_suppress __read_mostly;
468 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
469 module_param(rcu_cpu_stall_suppress, int, 0644);
470 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
471 module_param(rcu_cpu_stall_timeout, int, 0644);
472 #endif
473
474 #ifdef CONFIG_TASKS_RCU
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489 static struct rcu_head *rcu_tasks_cbs_head;
490 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
491 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
492 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
493
494
495 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
496
497
498 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
499 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
500 module_param(rcu_task_stall_timeout, int, 0644);
501
502 static struct task_struct *rcu_tasks_kthread_ptr;
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
523 {
524 unsigned long flags;
525 bool needwake;
526
527 rhp->next = NULL;
528 rhp->func = func;
529 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
530 needwake = !rcu_tasks_cbs_head;
531 *rcu_tasks_cbs_tail = rhp;
532 rcu_tasks_cbs_tail = &rhp->next;
533 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
534
535 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
536 wake_up(&rcu_tasks_cbs_wq);
537 }
538 EXPORT_SYMBOL_GPL(call_rcu_tasks);
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573 void synchronize_rcu_tasks(void)
574 {
575
576 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
577 "synchronize_rcu_tasks called too soon");
578
579
580 wait_rcu_gp(call_rcu_tasks);
581 }
582 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
583
584
585
586
587
588
589
590 void rcu_barrier_tasks(void)
591 {
592
593 synchronize_rcu_tasks();
594 }
595 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
596
597
598 static void check_holdout_task(struct task_struct *t,
599 bool needreport, bool *firstreport)
600 {
601 int cpu;
602
603 if (!READ_ONCE(t->rcu_tasks_holdout) ||
604 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
605 !READ_ONCE(t->on_rq) ||
606 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
607 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
608 WRITE_ONCE(t->rcu_tasks_holdout, false);
609 list_del_init(&t->rcu_tasks_holdout_list);
610 put_task_struct(t);
611 return;
612 }
613 rcu_request_urgent_qs_task(t);
614 if (!needreport)
615 return;
616 if (*firstreport) {
617 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
618 *firstreport = false;
619 }
620 cpu = task_cpu(t);
621 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
622 t, ".I"[is_idle_task(t)],
623 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
624 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
625 t->rcu_tasks_idle_cpu, cpu);
626 sched_show_task(t);
627 }
628
629
630 static int __noreturn rcu_tasks_kthread(void *arg)
631 {
632 unsigned long flags;
633 struct task_struct *g, *t;
634 unsigned long lastreport;
635 struct rcu_head *list;
636 struct rcu_head *next;
637 LIST_HEAD(rcu_tasks_holdouts);
638 int fract;
639
640
641 housekeeping_affine(current, HK_FLAG_RCU);
642
643
644
645
646
647
648
649 for (;;) {
650
651
652 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
653 list = rcu_tasks_cbs_head;
654 rcu_tasks_cbs_head = NULL;
655 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
656 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
657
658
659 if (!list) {
660 wait_event_interruptible(rcu_tasks_cbs_wq,
661 rcu_tasks_cbs_head);
662 if (!rcu_tasks_cbs_head) {
663 WARN_ON(signal_pending(current));
664 schedule_timeout_interruptible(HZ/10);
665 }
666 continue;
667 }
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 synchronize_rcu();
684
685
686
687
688
689
690
691
692 rcu_read_lock();
693 for_each_process_thread(g, t) {
694 if (t != current && READ_ONCE(t->on_rq) &&
695 !is_idle_task(t)) {
696 get_task_struct(t);
697 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
698 WRITE_ONCE(t->rcu_tasks_holdout, true);
699 list_add(&t->rcu_tasks_holdout_list,
700 &rcu_tasks_holdouts);
701 }
702 }
703 rcu_read_unlock();
704
705
706
707
708
709
710
711
712 synchronize_srcu(&tasks_rcu_exit_srcu);
713
714
715
716
717
718
719 lastreport = jiffies;
720
721
722 fract = 10;
723
724 for (;;) {
725 bool firstreport;
726 bool needreport;
727 int rtst;
728 struct task_struct *t1;
729
730 if (list_empty(&rcu_tasks_holdouts))
731 break;
732
733
734 schedule_timeout_interruptible(HZ/fract);
735
736 if (fract > 1)
737 fract--;
738
739 rtst = READ_ONCE(rcu_task_stall_timeout);
740 needreport = rtst > 0 &&
741 time_after(jiffies, lastreport + rtst);
742 if (needreport)
743 lastreport = jiffies;
744 firstreport = true;
745 WARN_ON(signal_pending(current));
746 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
747 rcu_tasks_holdout_list) {
748 check_holdout_task(t, needreport, &firstreport);
749 cond_resched();
750 }
751 }
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773 synchronize_rcu();
774
775
776 while (list) {
777 next = list->next;
778 local_bh_disable();
779 list->func(list);
780 local_bh_enable();
781 list = next;
782 cond_resched();
783 }
784
785 schedule_timeout_uninterruptible(HZ/10);
786 }
787 }
788
789
790 static int __init rcu_spawn_tasks_kthread(void)
791 {
792 struct task_struct *t;
793
794 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
795 if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
796 return 0;
797 smp_mb();
798 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
799 return 0;
800 }
801 core_initcall(rcu_spawn_tasks_kthread);
802
803
804 void exit_tasks_rcu_start(void)
805 {
806 preempt_disable();
807 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
808 preempt_enable();
809 }
810
811
812 void exit_tasks_rcu_finish(void)
813 {
814 preempt_disable();
815 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
816 preempt_enable();
817 }
818
819 #endif
820
821 #ifndef CONFIG_TINY_RCU
822
823
824
825
826 static void __init rcu_tasks_bootup_oddness(void)
827 {
828 #ifdef CONFIG_TASKS_RCU
829 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
830 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
831 else
832 pr_info("\tTasks RCU enabled.\n");
833 #endif
834 }
835
836 #endif
837
838 #ifdef CONFIG_PROVE_RCU
839
840
841
842
843 static bool rcu_self_test;
844 module_param(rcu_self_test, bool, 0444);
845
846 static int rcu_self_test_counter;
847
848 static void test_callback(struct rcu_head *r)
849 {
850 rcu_self_test_counter++;
851 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
852 }
853
854 DEFINE_STATIC_SRCU(early_srcu);
855
856 static void early_boot_test_call_rcu(void)
857 {
858 static struct rcu_head head;
859 static struct rcu_head shead;
860
861 call_rcu(&head, test_callback);
862 if (IS_ENABLED(CONFIG_SRCU))
863 call_srcu(&early_srcu, &shead, test_callback);
864 }
865
866 void rcu_early_boot_tests(void)
867 {
868 pr_info("Running RCU self tests\n");
869
870 if (rcu_self_test)
871 early_boot_test_call_rcu();
872 rcu_test_sync_prims();
873 }
874
875 static int rcu_verify_early_boot_tests(void)
876 {
877 int ret = 0;
878 int early_boot_test_counter = 0;
879
880 if (rcu_self_test) {
881 early_boot_test_counter++;
882 rcu_barrier();
883 if (IS_ENABLED(CONFIG_SRCU)) {
884 early_boot_test_counter++;
885 srcu_barrier(&early_srcu);
886 }
887 }
888 if (rcu_self_test_counter != early_boot_test_counter) {
889 WARN_ON(1);
890 ret = -1;
891 }
892
893 return ret;
894 }
895 late_initcall(rcu_verify_early_boot_tests);
896 #else
897 void rcu_early_boot_tests(void) {}
898 #endif
899
900 #ifndef CONFIG_TINY_RCU
901
902
903
904
905 void __init rcupdate_announce_bootup_oddness(void)
906 {
907 if (rcu_normal)
908 pr_info("\tNo expedited grace period (rcu_normal).\n");
909 else if (rcu_normal_after_boot)
910 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
911 else if (rcu_expedited)
912 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
913 if (rcu_cpu_stall_suppress)
914 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
915 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
916 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
917 rcu_tasks_bootup_oddness();
918 }
919
920 #endif