This source file includes following definitions.
- rcu_seq_ctr
- rcu_seq_state
- rcu_seq_set_state
- rcu_seq_start
- rcu_seq_endval
- rcu_seq_end
- rcu_seq_snap
- rcu_seq_current
- rcu_seq_started
- rcu_seq_done
- rcu_seq_completed_gp
- rcu_seq_new_gp
- rcu_seq_diff
- debug_rcu_head_queue
- debug_rcu_head_unqueue
- debug_rcu_head_queue
- debug_rcu_head_unqueue
- __rcu_reclaim
- rcu_init_levelspread
- srcu_init
- rcu_gp_is_normal
- rcu_gp_is_expedited
- rcu_expedite_gp
- rcu_unexpedite_gp
- rcu_request_urgent_qs_task
- rcutorture_get_gp_data
- rcutorture_record_progress
- srcutorture_get_gp_data
- rcu_get_gp_seq
- rcu_exp_batches_completed
- srcu_batches_completed
- rcu_force_quiescent_state
- show_rcu_gp_kthreads
- rcu_get_gp_kthreads_prio
- rcu_fwd_progress_check
- rcu_is_nocb_cpu
- rcu_bind_current_to_nocb
1
2
3
4
5
6
7
8
9
10 #ifndef __LINUX_RCU_H
11 #define __LINUX_RCU_H
12
13 #include <trace/events/rcu.h>
14
15
16 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
17
18
19
20
21
22
23 #define RCU_SEQ_CTR_SHIFT 2
24 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
25
26
27
28
29
30 static inline unsigned long rcu_seq_ctr(unsigned long s)
31 {
32 return s >> RCU_SEQ_CTR_SHIFT;
33 }
34
35
36
37
38
39 static inline int rcu_seq_state(unsigned long s)
40 {
41 return s & RCU_SEQ_STATE_MASK;
42 }
43
44
45
46
47
48 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
49 {
50 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
51 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
52 }
53
54
55 static inline void rcu_seq_start(unsigned long *sp)
56 {
57 WRITE_ONCE(*sp, *sp + 1);
58 smp_mb();
59 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
60 }
61
62
63 static inline unsigned long rcu_seq_endval(unsigned long *sp)
64 {
65 return (*sp | RCU_SEQ_STATE_MASK) + 1;
66 }
67
68
69 static inline void rcu_seq_end(unsigned long *sp)
70 {
71 smp_mb();
72 WARN_ON_ONCE(!rcu_seq_state(*sp));
73 WRITE_ONCE(*sp, rcu_seq_endval(sp));
74 }
75
76
77
78
79
80
81
82
83
84
85
86
87 static inline unsigned long rcu_seq_snap(unsigned long *sp)
88 {
89 unsigned long s;
90
91 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
92 smp_mb();
93 return s;
94 }
95
96
97 static inline unsigned long rcu_seq_current(unsigned long *sp)
98 {
99 return READ_ONCE(*sp);
100 }
101
102
103
104
105
106 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
107 {
108 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
109 }
110
111
112
113
114
115 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
116 {
117 return ULONG_CMP_GE(READ_ONCE(*sp), s);
118 }
119
120
121
122
123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
124 {
125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
126 }
127
128
129
130
131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
132 {
133 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
134 new);
135 }
136
137
138
139
140
141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
142 {
143 unsigned long rnd_diff;
144
145 if (old == new)
146 return 0;
147
148
149
150
151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
152 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
154 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
155 return 1;
156 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
157 }
158
159
160
161
162
163
164
165
166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
167 # define STATE_RCU_HEAD_READY 0
168 # define STATE_RCU_HEAD_QUEUED 1
169
170 extern struct debug_obj_descr rcuhead_debug_descr;
171
172 static inline int debug_rcu_head_queue(struct rcu_head *head)
173 {
174 int r1;
175
176 r1 = debug_object_activate(head, &rcuhead_debug_descr);
177 debug_object_active_state(head, &rcuhead_debug_descr,
178 STATE_RCU_HEAD_READY,
179 STATE_RCU_HEAD_QUEUED);
180 return r1;
181 }
182
183 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
184 {
185 debug_object_active_state(head, &rcuhead_debug_descr,
186 STATE_RCU_HEAD_QUEUED,
187 STATE_RCU_HEAD_READY);
188 debug_object_deactivate(head, &rcuhead_debug_descr);
189 }
190 #else
191 static inline int debug_rcu_head_queue(struct rcu_head *head)
192 {
193 return 0;
194 }
195
196 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197 {
198 }
199 #endif
200
201 void kfree(const void *);
202
203
204
205
206
207 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
208 {
209 rcu_callback_t f;
210 unsigned long offset = (unsigned long)head->func;
211
212 rcu_lock_acquire(&rcu_callback_map);
213 if (__is_kfree_rcu_offset(offset)) {
214 trace_rcu_invoke_kfree_callback(rn, head, offset);
215 kfree((void *)head - offset);
216 rcu_lock_release(&rcu_callback_map);
217 return true;
218 } else {
219 trace_rcu_invoke_callback(rn, head);
220 f = head->func;
221 WRITE_ONCE(head->func, (rcu_callback_t)0L);
222 f(head);
223 rcu_lock_release(&rcu_callback_map);
224 return false;
225 }
226 }
227
228 #ifdef CONFIG_RCU_STALL_COMMON
229
230 extern int rcu_cpu_stall_ftrace_dump;
231 extern int rcu_cpu_stall_suppress;
232 extern int rcu_cpu_stall_timeout;
233 int rcu_jiffies_till_stall_check(void);
234
235 #define rcu_ftrace_dump_stall_suppress() \
236 do { \
237 if (!rcu_cpu_stall_suppress) \
238 rcu_cpu_stall_suppress = 3; \
239 } while (0)
240
241 #define rcu_ftrace_dump_stall_unsuppress() \
242 do { \
243 if (rcu_cpu_stall_suppress == 3) \
244 rcu_cpu_stall_suppress = 0; \
245 } while (0)
246
247 #else
248 #define rcu_ftrace_dump_stall_suppress()
249 #define rcu_ftrace_dump_stall_unsuppress()
250 #endif
251
252
253
254
255
256
257 #define TPS(x) tracepoint_string(x)
258
259
260
261
262 #define rcu_ftrace_dump(oops_dump_mode) \
263 do { \
264 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
265 \
266 if (!atomic_read(&___rfd_beenhere) && \
267 !atomic_xchg(&___rfd_beenhere, 1)) { \
268 tracing_off(); \
269 rcu_ftrace_dump_stall_suppress(); \
270 ftrace_dump(oops_dump_mode); \
271 rcu_ftrace_dump_stall_unsuppress(); \
272 } \
273 } while (0)
274
275 void rcu_early_boot_tests(void);
276 void rcu_test_sync_prims(void);
277
278
279
280
281
282 extern void resched_cpu(int cpu);
283
284 #if defined(SRCU) || !defined(TINY_RCU)
285
286 #include <linux/rcu_node_tree.h>
287
288 extern int rcu_num_lvls;
289 extern int num_rcu_lvl[];
290 extern int rcu_num_nodes;
291 static bool rcu_fanout_exact;
292 static int rcu_fanout_leaf;
293
294
295
296
297
298 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
299 {
300 int i;
301
302 if (rcu_fanout_exact) {
303 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
304 for (i = rcu_num_lvls - 2; i >= 0; i--)
305 levelspread[i] = RCU_FANOUT;
306 } else {
307 int ccur;
308 int cprv;
309
310 cprv = nr_cpu_ids;
311 for (i = rcu_num_lvls - 1; i >= 0; i--) {
312 ccur = levelcnt[i];
313 levelspread[i] = (cprv + ccur - 1) / ccur;
314 cprv = ccur;
315 }
316 }
317 }
318
319
320 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
321
322
323 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
324
325
326 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
327
328
329
330
331
332
333 #define srcu_for_each_node_breadth_first(sp, rnp) \
334 for ((rnp) = &(sp)->node[0]; \
335 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
336 #define rcu_for_each_node_breadth_first(rnp) \
337 srcu_for_each_node_breadth_first(&rcu_state, rnp)
338
339
340
341
342
343
344
345 #define rcu_for_each_leaf_node(rnp) \
346 for ((rnp) = rcu_first_leaf_node(); \
347 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
348
349
350
351
352 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
353 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
354 (cpu) <= rnp->grphi; \
355 (cpu) = cpumask_next((cpu), cpu_possible_mask))
356
357
358
359
360 #define rcu_find_next_bit(rnp, cpu, mask) \
361 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
362 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
363 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
364 (cpu) <= rnp->grphi; \
365 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381 #define raw_spin_lock_rcu_node(p) \
382 do { \
383 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
384 smp_mb__after_unlock_lock(); \
385 } while (0)
386
387 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
388
389 #define raw_spin_lock_irq_rcu_node(p) \
390 do { \
391 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
392 smp_mb__after_unlock_lock(); \
393 } while (0)
394
395 #define raw_spin_unlock_irq_rcu_node(p) \
396 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
397
398 #define raw_spin_lock_irqsave_rcu_node(p, flags) \
399 do { \
400 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
401 smp_mb__after_unlock_lock(); \
402 } while (0)
403
404 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
405 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
406
407 #define raw_spin_trylock_rcu_node(p) \
408 ({ \
409 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
410 \
411 if (___locked) \
412 smp_mb__after_unlock_lock(); \
413 ___locked; \
414 })
415
416 #define raw_lockdep_assert_held_rcu_node(p) \
417 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
418
419 #endif
420
421 #ifdef CONFIG_SRCU
422 void srcu_init(void);
423 #else
424 static inline void srcu_init(void) { }
425 #endif
426
427 #ifdef CONFIG_TINY_RCU
428
429 static inline bool rcu_gp_is_normal(void) { return true; }
430 static inline bool rcu_gp_is_expedited(void) { return false; }
431 static inline void rcu_expedite_gp(void) { }
432 static inline void rcu_unexpedite_gp(void) { }
433 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
434 #else
435 bool rcu_gp_is_normal(void);
436 bool rcu_gp_is_expedited(void);
437 void rcu_expedite_gp(void);
438 void rcu_unexpedite_gp(void);
439 void rcupdate_announce_bootup_oddness(void);
440 void rcu_request_urgent_qs_task(struct task_struct *t);
441 #endif
442
443 #define RCU_SCHEDULER_INACTIVE 0
444 #define RCU_SCHEDULER_INIT 1
445 #define RCU_SCHEDULER_RUNNING 2
446
447 enum rcutorture_type {
448 RCU_FLAVOR,
449 RCU_TASKS_FLAVOR,
450 RCU_TRIVIAL_FLAVOR,
451 SRCU_FLAVOR,
452 INVALID_RCU_FLAVOR
453 };
454
455 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
456 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
457 unsigned long *gp_seq);
458 void rcutorture_record_progress(unsigned long vernum);
459 void do_trace_rcu_torture_read(const char *rcutorturename,
460 struct rcu_head *rhp,
461 unsigned long secs,
462 unsigned long c_old,
463 unsigned long c);
464 #else
465 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
466 int *flags, unsigned long *gp_seq)
467 {
468 *flags = 0;
469 *gp_seq = 0;
470 }
471 static inline void rcutorture_record_progress(unsigned long vernum) { }
472 #ifdef CONFIG_RCU_TRACE
473 void do_trace_rcu_torture_read(const char *rcutorturename,
474 struct rcu_head *rhp,
475 unsigned long secs,
476 unsigned long c_old,
477 unsigned long c);
478 #else
479 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
480 do { } while (0)
481 #endif
482 #endif
483
484 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
485 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
486 #endif
487
488 #ifdef CONFIG_TINY_SRCU
489
490 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
491 struct srcu_struct *sp, int *flags,
492 unsigned long *gp_seq)
493 {
494 if (test_type != SRCU_FLAVOR)
495 return;
496 *flags = 0;
497 *gp_seq = sp->srcu_idx;
498 }
499
500 #elif defined(CONFIG_TREE_SRCU)
501
502 void srcutorture_get_gp_data(enum rcutorture_type test_type,
503 struct srcu_struct *sp, int *flags,
504 unsigned long *gp_seq);
505
506 #endif
507
508 #ifdef CONFIG_TINY_RCU
509 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
510 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
511 static inline unsigned long
512 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
513 static inline void rcu_force_quiescent_state(void) { }
514 static inline void show_rcu_gp_kthreads(void) { }
515 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
516 static inline void rcu_fwd_progress_check(unsigned long j) { }
517 #else
518 unsigned long rcu_get_gp_seq(void);
519 unsigned long rcu_exp_batches_completed(void);
520 unsigned long srcu_batches_completed(struct srcu_struct *sp);
521 void show_rcu_gp_kthreads(void);
522 int rcu_get_gp_kthreads_prio(void);
523 void rcu_fwd_progress_check(unsigned long j);
524 void rcu_force_quiescent_state(void);
525 extern struct workqueue_struct *rcu_gp_wq;
526 extern struct workqueue_struct *rcu_par_gp_wq;
527 #endif
528
529 #ifdef CONFIG_RCU_NOCB_CPU
530 bool rcu_is_nocb_cpu(int cpu);
531 void rcu_bind_current_to_nocb(void);
532 #else
533 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
534 static inline void rcu_bind_current_to_nocb(void) { }
535 #endif
536
537 #endif