root/kernel/rcu/rcu.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. rcu_seq_ctr
  2. rcu_seq_state
  3. rcu_seq_set_state
  4. rcu_seq_start
  5. rcu_seq_endval
  6. rcu_seq_end
  7. rcu_seq_snap
  8. rcu_seq_current
  9. rcu_seq_started
  10. rcu_seq_done
  11. rcu_seq_completed_gp
  12. rcu_seq_new_gp
  13. rcu_seq_diff
  14. debug_rcu_head_queue
  15. debug_rcu_head_unqueue
  16. debug_rcu_head_queue
  17. debug_rcu_head_unqueue
  18. __rcu_reclaim
  19. rcu_init_levelspread
  20. srcu_init
  21. rcu_gp_is_normal
  22. rcu_gp_is_expedited
  23. rcu_expedite_gp
  24. rcu_unexpedite_gp
  25. rcu_request_urgent_qs_task
  26. rcutorture_get_gp_data
  27. rcutorture_record_progress
  28. srcutorture_get_gp_data
  29. rcu_get_gp_seq
  30. rcu_exp_batches_completed
  31. srcu_batches_completed
  32. rcu_force_quiescent_state
  33. show_rcu_gp_kthreads
  34. rcu_get_gp_kthreads_prio
  35. rcu_fwd_progress_check
  36. rcu_is_nocb_cpu
  37. rcu_bind_current_to_nocb

   1 /* SPDX-License-Identifier: GPL-2.0+ */
   2 /*
   3  * Read-Copy Update definitions shared among RCU implementations.
   4  *
   5  * Copyright IBM Corporation, 2011
   6  *
   7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8  */
   9 
  10 #ifndef __LINUX_RCU_H
  11 #define __LINUX_RCU_H
  12 
  13 #include <trace/events/rcu.h>
  14 
  15 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
  16 #define DYNTICK_IRQ_NONIDLE     ((LONG_MAX / 2) + 1)
  17 
  18 
  19 /*
  20  * Grace-period counter management.
  21  */
  22 
  23 #define RCU_SEQ_CTR_SHIFT       2
  24 #define RCU_SEQ_STATE_MASK      ((1 << RCU_SEQ_CTR_SHIFT) - 1)
  25 
  26 /*
  27  * Return the counter portion of a sequence number previously returned
  28  * by rcu_seq_snap() or rcu_seq_current().
  29  */
  30 static inline unsigned long rcu_seq_ctr(unsigned long s)
  31 {
  32         return s >> RCU_SEQ_CTR_SHIFT;
  33 }
  34 
  35 /*
  36  * Return the state portion of a sequence number previously returned
  37  * by rcu_seq_snap() or rcu_seq_current().
  38  */
  39 static inline int rcu_seq_state(unsigned long s)
  40 {
  41         return s & RCU_SEQ_STATE_MASK;
  42 }
  43 
  44 /*
  45  * Set the state portion of the pointed-to sequence number.
  46  * The caller is responsible for preventing conflicting updates.
  47  */
  48 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
  49 {
  50         WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
  51         WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
  52 }
  53 
  54 /* Adjust sequence number for start of update-side operation. */
  55 static inline void rcu_seq_start(unsigned long *sp)
  56 {
  57         WRITE_ONCE(*sp, *sp + 1);
  58         smp_mb(); /* Ensure update-side operation after counter increment. */
  59         WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
  60 }
  61 
  62 /* Compute the end-of-grace-period value for the specified sequence number. */
  63 static inline unsigned long rcu_seq_endval(unsigned long *sp)
  64 {
  65         return (*sp | RCU_SEQ_STATE_MASK) + 1;
  66 }
  67 
  68 /* Adjust sequence number for end of update-side operation. */
  69 static inline void rcu_seq_end(unsigned long *sp)
  70 {
  71         smp_mb(); /* Ensure update-side operation before counter increment. */
  72         WARN_ON_ONCE(!rcu_seq_state(*sp));
  73         WRITE_ONCE(*sp, rcu_seq_endval(sp));
  74 }
  75 
  76 /*
  77  * rcu_seq_snap - Take a snapshot of the update side's sequence number.
  78  *
  79  * This function returns the earliest value of the grace-period sequence number
  80  * that will indicate that a full grace period has elapsed since the current
  81  * time.  Once the grace-period sequence number has reached this value, it will
  82  * be safe to invoke all callbacks that have been registered prior to the
  83  * current time. This value is the current grace-period number plus two to the
  84  * power of the number of low-order bits reserved for state, then rounded up to
  85  * the next value in which the state bits are all zero.
  86  */
  87 static inline unsigned long rcu_seq_snap(unsigned long *sp)
  88 {
  89         unsigned long s;
  90 
  91         s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
  92         smp_mb(); /* Above access must not bleed into critical section. */
  93         return s;
  94 }
  95 
  96 /* Return the current value the update side's sequence number, no ordering. */
  97 static inline unsigned long rcu_seq_current(unsigned long *sp)
  98 {
  99         return READ_ONCE(*sp);
 100 }
 101 
 102 /*
 103  * Given a snapshot from rcu_seq_snap(), determine whether or not the
 104  * corresponding update-side operation has started.
 105  */
 106 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
 107 {
 108         return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
 109 }
 110 
 111 /*
 112  * Given a snapshot from rcu_seq_snap(), determine whether or not a
 113  * full update-side operation has occurred.
 114  */
 115 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
 116 {
 117         return ULONG_CMP_GE(READ_ONCE(*sp), s);
 118 }
 119 
 120 /*
 121  * Has a grace period completed since the time the old gp_seq was collected?
 122  */
 123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
 124 {
 125         return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
 126 }
 127 
 128 /*
 129  * Has a grace period started since the time the old gp_seq was collected?
 130  */
 131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
 132 {
 133         return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
 134                             new);
 135 }
 136 
 137 /*
 138  * Roughly how many full grace periods have elapsed between the collection
 139  * of the two specified grace periods?
 140  */
 141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
 142 {
 143         unsigned long rnd_diff;
 144 
 145         if (old == new)
 146                 return 0;
 147         /*
 148          * Compute the number of grace periods (still shifted up), plus
 149          * one if either of new and old is not an exact grace period.
 150          */
 151         rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
 152                    ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
 153                    ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
 154         if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
 155                 return 1; /* Definitely no grace period has elapsed. */
 156         return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
 157 }
 158 
 159 /*
 160  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
 161  * by call_rcu() and rcu callback execution, and are therefore not part
 162  * of the RCU API. These are in rcupdate.h because they are used by all
 163  * RCU implementations.
 164  */
 165 
 166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 167 # define STATE_RCU_HEAD_READY   0
 168 # define STATE_RCU_HEAD_QUEUED  1
 169 
 170 extern struct debug_obj_descr rcuhead_debug_descr;
 171 
 172 static inline int debug_rcu_head_queue(struct rcu_head *head)
 173 {
 174         int r1;
 175 
 176         r1 = debug_object_activate(head, &rcuhead_debug_descr);
 177         debug_object_active_state(head, &rcuhead_debug_descr,
 178                                   STATE_RCU_HEAD_READY,
 179                                   STATE_RCU_HEAD_QUEUED);
 180         return r1;
 181 }
 182 
 183 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 184 {
 185         debug_object_active_state(head, &rcuhead_debug_descr,
 186                                   STATE_RCU_HEAD_QUEUED,
 187                                   STATE_RCU_HEAD_READY);
 188         debug_object_deactivate(head, &rcuhead_debug_descr);
 189 }
 190 #else   /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 191 static inline int debug_rcu_head_queue(struct rcu_head *head)
 192 {
 193         return 0;
 194 }
 195 
 196 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 197 {
 198 }
 199 #endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 200 
 201 void kfree(const void *);
 202 
 203 /*
 204  * Reclaim the specified callback, either by invoking it (non-lazy case)
 205  * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
 206  */
 207 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
 208 {
 209         rcu_callback_t f;
 210         unsigned long offset = (unsigned long)head->func;
 211 
 212         rcu_lock_acquire(&rcu_callback_map);
 213         if (__is_kfree_rcu_offset(offset)) {
 214                 trace_rcu_invoke_kfree_callback(rn, head, offset);
 215                 kfree((void *)head - offset);
 216                 rcu_lock_release(&rcu_callback_map);
 217                 return true;
 218         } else {
 219                 trace_rcu_invoke_callback(rn, head);
 220                 f = head->func;
 221                 WRITE_ONCE(head->func, (rcu_callback_t)0L);
 222                 f(head);
 223                 rcu_lock_release(&rcu_callback_map);
 224                 return false;
 225         }
 226 }
 227 
 228 #ifdef CONFIG_RCU_STALL_COMMON
 229 
 230 extern int rcu_cpu_stall_ftrace_dump;
 231 extern int rcu_cpu_stall_suppress;
 232 extern int rcu_cpu_stall_timeout;
 233 int rcu_jiffies_till_stall_check(void);
 234 
 235 #define rcu_ftrace_dump_stall_suppress() \
 236 do { \
 237         if (!rcu_cpu_stall_suppress) \
 238                 rcu_cpu_stall_suppress = 3; \
 239 } while (0)
 240 
 241 #define rcu_ftrace_dump_stall_unsuppress() \
 242 do { \
 243         if (rcu_cpu_stall_suppress == 3) \
 244                 rcu_cpu_stall_suppress = 0; \
 245 } while (0)
 246 
 247 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
 248 #define rcu_ftrace_dump_stall_suppress()
 249 #define rcu_ftrace_dump_stall_unsuppress()
 250 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
 251 
 252 /*
 253  * Strings used in tracepoints need to be exported via the
 254  * tracing system such that tools like perf and trace-cmd can
 255  * translate the string address pointers to actual text.
 256  */
 257 #define TPS(x)  tracepoint_string(x)
 258 
 259 /*
 260  * Dump the ftrace buffer, but only one time per callsite per boot.
 261  */
 262 #define rcu_ftrace_dump(oops_dump_mode) \
 263 do { \
 264         static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
 265         \
 266         if (!atomic_read(&___rfd_beenhere) && \
 267             !atomic_xchg(&___rfd_beenhere, 1)) { \
 268                 tracing_off(); \
 269                 rcu_ftrace_dump_stall_suppress(); \
 270                 ftrace_dump(oops_dump_mode); \
 271                 rcu_ftrace_dump_stall_unsuppress(); \
 272         } \
 273 } while (0)
 274 
 275 void rcu_early_boot_tests(void);
 276 void rcu_test_sync_prims(void);
 277 
 278 /*
 279  * This function really isn't for public consumption, but RCU is special in
 280  * that context switches can allow the state machine to make progress.
 281  */
 282 extern void resched_cpu(int cpu);
 283 
 284 #if defined(SRCU) || !defined(TINY_RCU)
 285 
 286 #include <linux/rcu_node_tree.h>
 287 
 288 extern int rcu_num_lvls;
 289 extern int num_rcu_lvl[];
 290 extern int rcu_num_nodes;
 291 static bool rcu_fanout_exact;
 292 static int rcu_fanout_leaf;
 293 
 294 /*
 295  * Compute the per-level fanout, either using the exact fanout specified
 296  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
 297  */
 298 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 299 {
 300         int i;
 301 
 302         if (rcu_fanout_exact) {
 303                 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
 304                 for (i = rcu_num_lvls - 2; i >= 0; i--)
 305                         levelspread[i] = RCU_FANOUT;
 306         } else {
 307                 int ccur;
 308                 int cprv;
 309 
 310                 cprv = nr_cpu_ids;
 311                 for (i = rcu_num_lvls - 1; i >= 0; i--) {
 312                         ccur = levelcnt[i];
 313                         levelspread[i] = (cprv + ccur - 1) / ccur;
 314                         cprv = ccur;
 315                 }
 316         }
 317 }
 318 
 319 /* Returns a pointer to the first leaf rcu_node structure. */
 320 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
 321 
 322 /* Is this rcu_node a leaf? */
 323 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 324 
 325 /* Is this rcu_node the last leaf? */
 326 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
 327 
 328 /*
 329  * Do a full breadth-first scan of the {s,}rcu_node structures for the
 330  * specified state structure (for SRCU) or the only rcu_state structure
 331  * (for RCU).
 332  */
 333 #define srcu_for_each_node_breadth_first(sp, rnp) \
 334         for ((rnp) = &(sp)->node[0]; \
 335              (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
 336 #define rcu_for_each_node_breadth_first(rnp) \
 337         srcu_for_each_node_breadth_first(&rcu_state, rnp)
 338 
 339 /*
 340  * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
 341  * Note that if there is a singleton rcu_node tree with but one rcu_node
 342  * structure, this loop -will- visit the rcu_node structure.  It is still
 343  * a leaf node, even if it is also the root node.
 344  */
 345 #define rcu_for_each_leaf_node(rnp) \
 346         for ((rnp) = rcu_first_leaf_node(); \
 347              (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
 348 
 349 /*
 350  * Iterate over all possible CPUs in a leaf RCU node.
 351  */
 352 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
 353         for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
 354              (cpu) <= rnp->grphi; \
 355              (cpu) = cpumask_next((cpu), cpu_possible_mask))
 356 
 357 /*
 358  * Iterate over all CPUs in a leaf RCU node's specified mask.
 359  */
 360 #define rcu_find_next_bit(rnp, cpu, mask) \
 361         ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
 362 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
 363         for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
 364              (cpu) <= rnp->grphi; \
 365              (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
 366 
 367 /*
 368  * Wrappers for the rcu_node::lock acquire and release.
 369  *
 370  * Because the rcu_nodes form a tree, the tree traversal locking will observe
 371  * different lock values, this in turn means that an UNLOCK of one level
 372  * followed by a LOCK of another level does not imply a full memory barrier;
 373  * and most importantly transitivity is lost.
 374  *
 375  * In order to restore full ordering between tree levels, augment the regular
 376  * lock acquire functions with smp_mb__after_unlock_lock().
 377  *
 378  * As ->lock of struct rcu_node is a __private field, therefore one should use
 379  * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
 380  */
 381 #define raw_spin_lock_rcu_node(p)                                       \
 382 do {                                                                    \
 383         raw_spin_lock(&ACCESS_PRIVATE(p, lock));                        \
 384         smp_mb__after_unlock_lock();                                    \
 385 } while (0)
 386 
 387 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
 388 
 389 #define raw_spin_lock_irq_rcu_node(p)                                   \
 390 do {                                                                    \
 391         raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));                    \
 392         smp_mb__after_unlock_lock();                                    \
 393 } while (0)
 394 
 395 #define raw_spin_unlock_irq_rcu_node(p)                                 \
 396         raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
 397 
 398 #define raw_spin_lock_irqsave_rcu_node(p, flags)                        \
 399 do {                                                                    \
 400         raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
 401         smp_mb__after_unlock_lock();                                    \
 402 } while (0)
 403 
 404 #define raw_spin_unlock_irqrestore_rcu_node(p, flags)                   \
 405         raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
 406 
 407 #define raw_spin_trylock_rcu_node(p)                                    \
 408 ({                                                                      \
 409         bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));    \
 410                                                                         \
 411         if (___locked)                                                  \
 412                 smp_mb__after_unlock_lock();                            \
 413         ___locked;                                                      \
 414 })
 415 
 416 #define raw_lockdep_assert_held_rcu_node(p)                             \
 417         lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
 418 
 419 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */
 420 
 421 #ifdef CONFIG_SRCU
 422 void srcu_init(void);
 423 #else /* #ifdef CONFIG_SRCU */
 424 static inline void srcu_init(void) { }
 425 #endif /* #else #ifdef CONFIG_SRCU */
 426 
 427 #ifdef CONFIG_TINY_RCU
 428 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
 429 static inline bool rcu_gp_is_normal(void) { return true; }
 430 static inline bool rcu_gp_is_expedited(void) { return false; }
 431 static inline void rcu_expedite_gp(void) { }
 432 static inline void rcu_unexpedite_gp(void) { }
 433 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
 434 #else /* #ifdef CONFIG_TINY_RCU */
 435 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
 436 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
 437 void rcu_expedite_gp(void);
 438 void rcu_unexpedite_gp(void);
 439 void rcupdate_announce_bootup_oddness(void);
 440 void rcu_request_urgent_qs_task(struct task_struct *t);
 441 #endif /* #else #ifdef CONFIG_TINY_RCU */
 442 
 443 #define RCU_SCHEDULER_INACTIVE  0
 444 #define RCU_SCHEDULER_INIT      1
 445 #define RCU_SCHEDULER_RUNNING   2
 446 
 447 enum rcutorture_type {
 448         RCU_FLAVOR,
 449         RCU_TASKS_FLAVOR,
 450         RCU_TRIVIAL_FLAVOR,
 451         SRCU_FLAVOR,
 452         INVALID_RCU_FLAVOR
 453 };
 454 
 455 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 456 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 457                             unsigned long *gp_seq);
 458 void rcutorture_record_progress(unsigned long vernum);
 459 void do_trace_rcu_torture_read(const char *rcutorturename,
 460                                struct rcu_head *rhp,
 461                                unsigned long secs,
 462                                unsigned long c_old,
 463                                unsigned long c);
 464 #else
 465 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
 466                                           int *flags, unsigned long *gp_seq)
 467 {
 468         *flags = 0;
 469         *gp_seq = 0;
 470 }
 471 static inline void rcutorture_record_progress(unsigned long vernum) { }
 472 #ifdef CONFIG_RCU_TRACE
 473 void do_trace_rcu_torture_read(const char *rcutorturename,
 474                                struct rcu_head *rhp,
 475                                unsigned long secs,
 476                                unsigned long c_old,
 477                                unsigned long c);
 478 #else
 479 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
 480         do { } while (0)
 481 #endif
 482 #endif
 483 
 484 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
 485 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
 486 #endif
 487 
 488 #ifdef CONFIG_TINY_SRCU
 489 
 490 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
 491                                            struct srcu_struct *sp, int *flags,
 492                                            unsigned long *gp_seq)
 493 {
 494         if (test_type != SRCU_FLAVOR)
 495                 return;
 496         *flags = 0;
 497         *gp_seq = sp->srcu_idx;
 498 }
 499 
 500 #elif defined(CONFIG_TREE_SRCU)
 501 
 502 void srcutorture_get_gp_data(enum rcutorture_type test_type,
 503                              struct srcu_struct *sp, int *flags,
 504                              unsigned long *gp_seq);
 505 
 506 #endif
 507 
 508 #ifdef CONFIG_TINY_RCU
 509 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
 510 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
 511 static inline unsigned long
 512 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
 513 static inline void rcu_force_quiescent_state(void) { }
 514 static inline void show_rcu_gp_kthreads(void) { }
 515 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
 516 static inline void rcu_fwd_progress_check(unsigned long j) { }
 517 #else /* #ifdef CONFIG_TINY_RCU */
 518 unsigned long rcu_get_gp_seq(void);
 519 unsigned long rcu_exp_batches_completed(void);
 520 unsigned long srcu_batches_completed(struct srcu_struct *sp);
 521 void show_rcu_gp_kthreads(void);
 522 int rcu_get_gp_kthreads_prio(void);
 523 void rcu_fwd_progress_check(unsigned long j);
 524 void rcu_force_quiescent_state(void);
 525 extern struct workqueue_struct *rcu_gp_wq;
 526 extern struct workqueue_struct *rcu_par_gp_wq;
 527 #endif /* #else #ifdef CONFIG_TINY_RCU */
 528 
 529 #ifdef CONFIG_RCU_NOCB_CPU
 530 bool rcu_is_nocb_cpu(int cpu);
 531 void rcu_bind_current_to_nocb(void);
 532 #else
 533 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
 534 static inline void rcu_bind_current_to_nocb(void) { }
 535 #endif
 536 
 537 #endif /* __LINUX_RCU_H */

/* [<][>][^][v][top][bottom][index][help] */