root/include/linux/rculist.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. INIT_LIST_HEAD_RCU
  2. __list_add_rcu
  3. list_add_rcu
  4. list_add_tail_rcu
  5. list_del_rcu
  6. hlist_del_init_rcu
  7. list_replace_rcu
  8. __list_splice_init_rcu
  9. list_splice_init_rcu
  10. list_splice_tail_init_rcu
  11. hlist_del_rcu
  12. hlist_replace_rcu
  13. hlist_add_head_rcu
  14. hlist_add_tail_rcu
  15. hlist_add_before_rcu
  16. hlist_add_behind_rcu

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_RCULIST_H
   3 #define _LINUX_RCULIST_H
   4 
   5 #ifdef __KERNEL__
   6 
   7 /*
   8  * RCU-protected list version
   9  */
  10 #include <linux/list.h>
  11 #include <linux/rcupdate.h>
  12 
  13 /*
  14  * Why is there no list_empty_rcu()?  Because list_empty() serves this
  15  * purpose.  The list_empty() function fetches the RCU-protected pointer
  16  * and compares it to the address of the list head, but neither dereferences
  17  * this pointer itself nor provides this pointer to the caller.  Therefore,
  18  * it is not necessary to use rcu_dereference(), so that list_empty() can
  19  * be used anywhere you would want to use a list_empty_rcu().
  20  */
  21 
  22 /*
  23  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
  24  * @list: list to be initialized
  25  *
  26  * You should instead use INIT_LIST_HEAD() for normal initialization and
  27  * cleanup tasks, when readers have no access to the list being initialized.
  28  * However, if the list being initialized is visible to readers, you
  29  * need to keep the compiler from being too mischievous.
  30  */
  31 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
  32 {
  33         WRITE_ONCE(list->next, list);
  34         WRITE_ONCE(list->prev, list);
  35 }
  36 
  37 /*
  38  * return the ->next pointer of a list_head in an rcu safe
  39  * way, we must not access it directly
  40  */
  41 #define list_next_rcu(list)     (*((struct list_head __rcu **)(&(list)->next)))
  42 
  43 /*
  44  * Check during list traversal that we are within an RCU reader
  45  */
  46 
  47 #define check_arg_count_one(dummy)
  48 
  49 #ifdef CONFIG_PROVE_RCU_LIST
  50 #define __list_check_rcu(dummy, cond, extra...)                         \
  51         ({                                                              \
  52         check_arg_count_one(extra);                                     \
  53         RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(),            \
  54                          "RCU-list traversed in non-reader section!");  \
  55          })
  56 #else
  57 #define __list_check_rcu(dummy, cond, extra...)                         \
  58         ({ check_arg_count_one(extra); })
  59 #endif
  60 
  61 /*
  62  * Insert a new entry between two known consecutive entries.
  63  *
  64  * This is only for internal list manipulation where we know
  65  * the prev/next entries already!
  66  */
  67 static inline void __list_add_rcu(struct list_head *new,
  68                 struct list_head *prev, struct list_head *next)
  69 {
  70         if (!__list_add_valid(new, prev, next))
  71                 return;
  72 
  73         new->next = next;
  74         new->prev = prev;
  75         rcu_assign_pointer(list_next_rcu(prev), new);
  76         next->prev = new;
  77 }
  78 
  79 /**
  80  * list_add_rcu - add a new entry to rcu-protected list
  81  * @new: new entry to be added
  82  * @head: list head to add it after
  83  *
  84  * Insert a new entry after the specified head.
  85  * This is good for implementing stacks.
  86  *
  87  * The caller must take whatever precautions are necessary
  88  * (such as holding appropriate locks) to avoid racing
  89  * with another list-mutation primitive, such as list_add_rcu()
  90  * or list_del_rcu(), running on this same list.
  91  * However, it is perfectly legal to run concurrently with
  92  * the _rcu list-traversal primitives, such as
  93  * list_for_each_entry_rcu().
  94  */
  95 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
  96 {
  97         __list_add_rcu(new, head, head->next);
  98 }
  99 
 100 /**
 101  * list_add_tail_rcu - add a new entry to rcu-protected list
 102  * @new: new entry to be added
 103  * @head: list head to add it before
 104  *
 105  * Insert a new entry before the specified head.
 106  * This is useful for implementing queues.
 107  *
 108  * The caller must take whatever precautions are necessary
 109  * (such as holding appropriate locks) to avoid racing
 110  * with another list-mutation primitive, such as list_add_tail_rcu()
 111  * or list_del_rcu(), running on this same list.
 112  * However, it is perfectly legal to run concurrently with
 113  * the _rcu list-traversal primitives, such as
 114  * list_for_each_entry_rcu().
 115  */
 116 static inline void list_add_tail_rcu(struct list_head *new,
 117                                         struct list_head *head)
 118 {
 119         __list_add_rcu(new, head->prev, head);
 120 }
 121 
 122 /**
 123  * list_del_rcu - deletes entry from list without re-initialization
 124  * @entry: the element to delete from the list.
 125  *
 126  * Note: list_empty() on entry does not return true after this,
 127  * the entry is in an undefined state. It is useful for RCU based
 128  * lockfree traversal.
 129  *
 130  * In particular, it means that we can not poison the forward
 131  * pointers that may still be used for walking the list.
 132  *
 133  * The caller must take whatever precautions are necessary
 134  * (such as holding appropriate locks) to avoid racing
 135  * with another list-mutation primitive, such as list_del_rcu()
 136  * or list_add_rcu(), running on this same list.
 137  * However, it is perfectly legal to run concurrently with
 138  * the _rcu list-traversal primitives, such as
 139  * list_for_each_entry_rcu().
 140  *
 141  * Note that the caller is not permitted to immediately free
 142  * the newly deleted entry.  Instead, either synchronize_rcu()
 143  * or call_rcu() must be used to defer freeing until an RCU
 144  * grace period has elapsed.
 145  */
 146 static inline void list_del_rcu(struct list_head *entry)
 147 {
 148         __list_del_entry(entry);
 149         entry->prev = LIST_POISON2;
 150 }
 151 
 152 /**
 153  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
 154  * @n: the element to delete from the hash list.
 155  *
 156  * Note: list_unhashed() on the node return true after this. It is
 157  * useful for RCU based read lockfree traversal if the writer side
 158  * must know if the list entry is still hashed or already unhashed.
 159  *
 160  * In particular, it means that we can not poison the forward pointers
 161  * that may still be used for walking the hash list and we can only
 162  * zero the pprev pointer so list_unhashed() will return true after
 163  * this.
 164  *
 165  * The caller must take whatever precautions are necessary (such as
 166  * holding appropriate locks) to avoid racing with another
 167  * list-mutation primitive, such as hlist_add_head_rcu() or
 168  * hlist_del_rcu(), running on this same list.  However, it is
 169  * perfectly legal to run concurrently with the _rcu list-traversal
 170  * primitives, such as hlist_for_each_entry_rcu().
 171  */
 172 static inline void hlist_del_init_rcu(struct hlist_node *n)
 173 {
 174         if (!hlist_unhashed(n)) {
 175                 __hlist_del(n);
 176                 n->pprev = NULL;
 177         }
 178 }
 179 
 180 /**
 181  * list_replace_rcu - replace old entry by new one
 182  * @old : the element to be replaced
 183  * @new : the new element to insert
 184  *
 185  * The @old entry will be replaced with the @new entry atomically.
 186  * Note: @old should not be empty.
 187  */
 188 static inline void list_replace_rcu(struct list_head *old,
 189                                 struct list_head *new)
 190 {
 191         new->next = old->next;
 192         new->prev = old->prev;
 193         rcu_assign_pointer(list_next_rcu(new->prev), new);
 194         new->next->prev = new;
 195         old->prev = LIST_POISON2;
 196 }
 197 
 198 /**
 199  * __list_splice_init_rcu - join an RCU-protected list into an existing list.
 200  * @list:       the RCU-protected list to splice
 201  * @prev:       points to the last element of the existing list
 202  * @next:       points to the first element of the existing list
 203  * @sync:       synchronize_rcu, synchronize_rcu_expedited, ...
 204  *
 205  * The list pointed to by @prev and @next can be RCU-read traversed
 206  * concurrently with this function.
 207  *
 208  * Note that this function blocks.
 209  *
 210  * Important note: the caller must take whatever action is necessary to prevent
 211  * any other updates to the existing list.  In principle, it is possible to
 212  * modify the list as soon as sync() begins execution. If this sort of thing
 213  * becomes necessary, an alternative version based on call_rcu() could be
 214  * created.  But only if -really- needed -- there is no shortage of RCU API
 215  * members.
 216  */
 217 static inline void __list_splice_init_rcu(struct list_head *list,
 218                                           struct list_head *prev,
 219                                           struct list_head *next,
 220                                           void (*sync)(void))
 221 {
 222         struct list_head *first = list->next;
 223         struct list_head *last = list->prev;
 224 
 225         /*
 226          * "first" and "last" tracking list, so initialize it.  RCU readers
 227          * have access to this list, so we must use INIT_LIST_HEAD_RCU()
 228          * instead of INIT_LIST_HEAD().
 229          */
 230 
 231         INIT_LIST_HEAD_RCU(list);
 232 
 233         /*
 234          * At this point, the list body still points to the source list.
 235          * Wait for any readers to finish using the list before splicing
 236          * the list body into the new list.  Any new readers will see
 237          * an empty list.
 238          */
 239 
 240         sync();
 241 
 242         /*
 243          * Readers are finished with the source list, so perform splice.
 244          * The order is important if the new list is global and accessible
 245          * to concurrent RCU readers.  Note that RCU readers are not
 246          * permitted to traverse the prev pointers without excluding
 247          * this function.
 248          */
 249 
 250         last->next = next;
 251         rcu_assign_pointer(list_next_rcu(prev), first);
 252         first->prev = prev;
 253         next->prev = last;
 254 }
 255 
 256 /**
 257  * list_splice_init_rcu - splice an RCU-protected list into an existing list,
 258  *                        designed for stacks.
 259  * @list:       the RCU-protected list to splice
 260  * @head:       the place in the existing list to splice the first list into
 261  * @sync:       synchronize_rcu, synchronize_rcu_expedited, ...
 262  */
 263 static inline void list_splice_init_rcu(struct list_head *list,
 264                                         struct list_head *head,
 265                                         void (*sync)(void))
 266 {
 267         if (!list_empty(list))
 268                 __list_splice_init_rcu(list, head, head->next, sync);
 269 }
 270 
 271 /**
 272  * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
 273  *                             list, designed for queues.
 274  * @list:       the RCU-protected list to splice
 275  * @head:       the place in the existing list to splice the first list into
 276  * @sync:       synchronize_rcu, synchronize_rcu_expedited, ...
 277  */
 278 static inline void list_splice_tail_init_rcu(struct list_head *list,
 279                                              struct list_head *head,
 280                                              void (*sync)(void))
 281 {
 282         if (!list_empty(list))
 283                 __list_splice_init_rcu(list, head->prev, head, sync);
 284 }
 285 
 286 /**
 287  * list_entry_rcu - get the struct for this entry
 288  * @ptr:        the &struct list_head pointer.
 289  * @type:       the type of the struct this is embedded in.
 290  * @member:     the name of the list_head within the struct.
 291  *
 292  * This primitive may safely run concurrently with the _rcu list-mutation
 293  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 294  */
 295 #define list_entry_rcu(ptr, type, member) \
 296         container_of(READ_ONCE(ptr), type, member)
 297 
 298 /*
 299  * Where are list_empty_rcu() and list_first_entry_rcu()?
 300  *
 301  * Implementing those functions following their counterparts list_empty() and
 302  * list_first_entry() is not advisable because they lead to subtle race
 303  * conditions as the following snippet shows:
 304  *
 305  * if (!list_empty_rcu(mylist)) {
 306  *      struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
 307  *      do_something(bar);
 308  * }
 309  *
 310  * The list may not be empty when list_empty_rcu checks it, but it may be when
 311  * list_first_entry_rcu rereads the ->next pointer.
 312  *
 313  * Rereading the ->next pointer is not a problem for list_empty() and
 314  * list_first_entry() because they would be protected by a lock that blocks
 315  * writers.
 316  *
 317  * See list_first_or_null_rcu for an alternative.
 318  */
 319 
 320 /**
 321  * list_first_or_null_rcu - get the first element from a list
 322  * @ptr:        the list head to take the element from.
 323  * @type:       the type of the struct this is embedded in.
 324  * @member:     the name of the list_head within the struct.
 325  *
 326  * Note that if the list is empty, it returns NULL.
 327  *
 328  * This primitive may safely run concurrently with the _rcu list-mutation
 329  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 330  */
 331 #define list_first_or_null_rcu(ptr, type, member) \
 332 ({ \
 333         struct list_head *__ptr = (ptr); \
 334         struct list_head *__next = READ_ONCE(__ptr->next); \
 335         likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
 336 })
 337 
 338 /**
 339  * list_next_or_null_rcu - get the first element from a list
 340  * @head:       the head for the list.
 341  * @ptr:        the list head to take the next element from.
 342  * @type:       the type of the struct this is embedded in.
 343  * @member:     the name of the list_head within the struct.
 344  *
 345  * Note that if the ptr is at the end of the list, NULL is returned.
 346  *
 347  * This primitive may safely run concurrently with the _rcu list-mutation
 348  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
 349  */
 350 #define list_next_or_null_rcu(head, ptr, type, member) \
 351 ({ \
 352         struct list_head *__head = (head); \
 353         struct list_head *__ptr = (ptr); \
 354         struct list_head *__next = READ_ONCE(__ptr->next); \
 355         likely(__next != __head) ? list_entry_rcu(__next, type, \
 356                                                   member) : NULL; \
 357 })
 358 
 359 /**
 360  * list_for_each_entry_rcu      -       iterate over rcu list of given type
 361  * @pos:        the type * to use as a loop cursor.
 362  * @head:       the head for your list.
 363  * @member:     the name of the list_head within the struct.
 364  * @cond:       optional lockdep expression if called from non-RCU protection.
 365  *
 366  * This list-traversal primitive may safely run concurrently with
 367  * the _rcu list-mutation primitives such as list_add_rcu()
 368  * as long as the traversal is guarded by rcu_read_lock().
 369  */
 370 #define list_for_each_entry_rcu(pos, head, member, cond...)             \
 371         for (__list_check_rcu(dummy, ## cond, 0),                       \
 372              pos = list_entry_rcu((head)->next, typeof(*pos), member);  \
 373                 &pos->member != (head);                                 \
 374                 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 375 
 376 /**
 377  * list_entry_lockless - get the struct for this entry
 378  * @ptr:        the &struct list_head pointer.
 379  * @type:       the type of the struct this is embedded in.
 380  * @member:     the name of the list_head within the struct.
 381  *
 382  * This primitive may safely run concurrently with the _rcu
 383  * list-mutation primitives such as list_add_rcu(), but requires some
 384  * implicit RCU read-side guarding.  One example is running within a special
 385  * exception-time environment where preemption is disabled and where lockdep
 386  * cannot be invoked.  Another example is when items are added to the list,
 387  * but never deleted.
 388  */
 389 #define list_entry_lockless(ptr, type, member) \
 390         container_of((typeof(ptr))READ_ONCE(ptr), type, member)
 391 
 392 /**
 393  * list_for_each_entry_lockless - iterate over rcu list of given type
 394  * @pos:        the type * to use as a loop cursor.
 395  * @head:       the head for your list.
 396  * @member:     the name of the list_struct within the struct.
 397  *
 398  * This primitive may safely run concurrently with the _rcu
 399  * list-mutation primitives such as list_add_rcu(), but requires some
 400  * implicit RCU read-side guarding.  One example is running within a special
 401  * exception-time environment where preemption is disabled and where lockdep
 402  * cannot be invoked.  Another example is when items are added to the list,
 403  * but never deleted.
 404  */
 405 #define list_for_each_entry_lockless(pos, head, member) \
 406         for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
 407              &pos->member != (head); \
 408              pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
 409 
 410 /**
 411  * list_for_each_entry_continue_rcu - continue iteration over list of given type
 412  * @pos:        the type * to use as a loop cursor.
 413  * @head:       the head for your list.
 414  * @member:     the name of the list_head within the struct.
 415  *
 416  * Continue to iterate over list of given type, continuing after
 417  * the current position which must have been in the list when the RCU read
 418  * lock was taken.
 419  * This would typically require either that you obtained the node from a
 420  * previous walk of the list in the same RCU read-side critical section, or
 421  * that you held some sort of non-RCU reference (such as a reference count)
 422  * to keep the node alive *and* in the list.
 423  *
 424  * This iterator is similar to list_for_each_entry_from_rcu() except
 425  * this starts after the given position and that one starts at the given
 426  * position.
 427  */
 428 #define list_for_each_entry_continue_rcu(pos, head, member)             \
 429         for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
 430              &pos->member != (head);    \
 431              pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
 432 
 433 /**
 434  * list_for_each_entry_from_rcu - iterate over a list from current point
 435  * @pos:        the type * to use as a loop cursor.
 436  * @head:       the head for your list.
 437  * @member:     the name of the list_node within the struct.
 438  *
 439  * Iterate over the tail of a list starting from a given position,
 440  * which must have been in the list when the RCU read lock was taken.
 441  * This would typically require either that you obtained the node from a
 442  * previous walk of the list in the same RCU read-side critical section, or
 443  * that you held some sort of non-RCU reference (such as a reference count)
 444  * to keep the node alive *and* in the list.
 445  *
 446  * This iterator is similar to list_for_each_entry_continue_rcu() except
 447  * this starts from the given position and that one starts from the position
 448  * after the given position.
 449  */
 450 #define list_for_each_entry_from_rcu(pos, head, member)                 \
 451         for (; &(pos)->member != (head);                                        \
 452                 pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
 453 
 454 /**
 455  * hlist_del_rcu - deletes entry from hash list without re-initialization
 456  * @n: the element to delete from the hash list.
 457  *
 458  * Note: list_unhashed() on entry does not return true after this,
 459  * the entry is in an undefined state. It is useful for RCU based
 460  * lockfree traversal.
 461  *
 462  * In particular, it means that we can not poison the forward
 463  * pointers that may still be used for walking the hash list.
 464  *
 465  * The caller must take whatever precautions are necessary
 466  * (such as holding appropriate locks) to avoid racing
 467  * with another list-mutation primitive, such as hlist_add_head_rcu()
 468  * or hlist_del_rcu(), running on this same list.
 469  * However, it is perfectly legal to run concurrently with
 470  * the _rcu list-traversal primitives, such as
 471  * hlist_for_each_entry().
 472  */
 473 static inline void hlist_del_rcu(struct hlist_node *n)
 474 {
 475         __hlist_del(n);
 476         n->pprev = LIST_POISON2;
 477 }
 478 
 479 /**
 480  * hlist_replace_rcu - replace old entry by new one
 481  * @old : the element to be replaced
 482  * @new : the new element to insert
 483  *
 484  * The @old entry will be replaced with the @new entry atomically.
 485  */
 486 static inline void hlist_replace_rcu(struct hlist_node *old,
 487                                         struct hlist_node *new)
 488 {
 489         struct hlist_node *next = old->next;
 490 
 491         new->next = next;
 492         new->pprev = old->pprev;
 493         rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
 494         if (next)
 495                 new->next->pprev = &new->next;
 496         old->pprev = LIST_POISON2;
 497 }
 498 
 499 /*
 500  * return the first or the next element in an RCU protected hlist
 501  */
 502 #define hlist_first_rcu(head)   (*((struct hlist_node __rcu **)(&(head)->first)))
 503 #define hlist_next_rcu(node)    (*((struct hlist_node __rcu **)(&(node)->next)))
 504 #define hlist_pprev_rcu(node)   (*((struct hlist_node __rcu **)((node)->pprev)))
 505 
 506 /**
 507  * hlist_add_head_rcu
 508  * @n: the element to add to the hash list.
 509  * @h: the list to add to.
 510  *
 511  * Description:
 512  * Adds the specified element to the specified hlist,
 513  * while permitting racing traversals.
 514  *
 515  * The caller must take whatever precautions are necessary
 516  * (such as holding appropriate locks) to avoid racing
 517  * with another list-mutation primitive, such as hlist_add_head_rcu()
 518  * or hlist_del_rcu(), running on this same list.
 519  * However, it is perfectly legal to run concurrently with
 520  * the _rcu list-traversal primitives, such as
 521  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 522  * problems on Alpha CPUs.  Regardless of the type of CPU, the
 523  * list-traversal primitive must be guarded by rcu_read_lock().
 524  */
 525 static inline void hlist_add_head_rcu(struct hlist_node *n,
 526                                         struct hlist_head *h)
 527 {
 528         struct hlist_node *first = h->first;
 529 
 530         n->next = first;
 531         n->pprev = &h->first;
 532         rcu_assign_pointer(hlist_first_rcu(h), n);
 533         if (first)
 534                 first->pprev = &n->next;
 535 }
 536 
 537 /**
 538  * hlist_add_tail_rcu
 539  * @n: the element to add to the hash list.
 540  * @h: the list to add to.
 541  *
 542  * Description:
 543  * Adds the specified element to the specified hlist,
 544  * while permitting racing traversals.
 545  *
 546  * The caller must take whatever precautions are necessary
 547  * (such as holding appropriate locks) to avoid racing
 548  * with another list-mutation primitive, such as hlist_add_head_rcu()
 549  * or hlist_del_rcu(), running on this same list.
 550  * However, it is perfectly legal to run concurrently with
 551  * the _rcu list-traversal primitives, such as
 552  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 553  * problems on Alpha CPUs.  Regardless of the type of CPU, the
 554  * list-traversal primitive must be guarded by rcu_read_lock().
 555  */
 556 static inline void hlist_add_tail_rcu(struct hlist_node *n,
 557                                       struct hlist_head *h)
 558 {
 559         struct hlist_node *i, *last = NULL;
 560 
 561         /* Note: write side code, so rcu accessors are not needed. */
 562         for (i = h->first; i; i = i->next)
 563                 last = i;
 564 
 565         if (last) {
 566                 n->next = last->next;
 567                 n->pprev = &last->next;
 568                 rcu_assign_pointer(hlist_next_rcu(last), n);
 569         } else {
 570                 hlist_add_head_rcu(n, h);
 571         }
 572 }
 573 
 574 /**
 575  * hlist_add_before_rcu
 576  * @n: the new element to add to the hash list.
 577  * @next: the existing element to add the new element before.
 578  *
 579  * Description:
 580  * Adds the specified element to the specified hlist
 581  * before the specified node while permitting racing traversals.
 582  *
 583  * The caller must take whatever precautions are necessary
 584  * (such as holding appropriate locks) to avoid racing
 585  * with another list-mutation primitive, such as hlist_add_head_rcu()
 586  * or hlist_del_rcu(), running on this same list.
 587  * However, it is perfectly legal to run concurrently with
 588  * the _rcu list-traversal primitives, such as
 589  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 590  * problems on Alpha CPUs.
 591  */
 592 static inline void hlist_add_before_rcu(struct hlist_node *n,
 593                                         struct hlist_node *next)
 594 {
 595         n->pprev = next->pprev;
 596         n->next = next;
 597         rcu_assign_pointer(hlist_pprev_rcu(n), n);
 598         next->pprev = &n->next;
 599 }
 600 
 601 /**
 602  * hlist_add_behind_rcu
 603  * @n: the new element to add to the hash list.
 604  * @prev: the existing element to add the new element after.
 605  *
 606  * Description:
 607  * Adds the specified element to the specified hlist
 608  * after the specified node while permitting racing traversals.
 609  *
 610  * The caller must take whatever precautions are necessary
 611  * (such as holding appropriate locks) to avoid racing
 612  * with another list-mutation primitive, such as hlist_add_head_rcu()
 613  * or hlist_del_rcu(), running on this same list.
 614  * However, it is perfectly legal to run concurrently with
 615  * the _rcu list-traversal primitives, such as
 616  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
 617  * problems on Alpha CPUs.
 618  */
 619 static inline void hlist_add_behind_rcu(struct hlist_node *n,
 620                                         struct hlist_node *prev)
 621 {
 622         n->next = prev->next;
 623         n->pprev = &prev->next;
 624         rcu_assign_pointer(hlist_next_rcu(prev), n);
 625         if (n->next)
 626                 n->next->pprev = &n->next;
 627 }
 628 
 629 #define __hlist_for_each_rcu(pos, head)                         \
 630         for (pos = rcu_dereference(hlist_first_rcu(head));      \
 631              pos;                                               \
 632              pos = rcu_dereference(hlist_next_rcu(pos)))
 633 
 634 /**
 635  * hlist_for_each_entry_rcu - iterate over rcu list of given type
 636  * @pos:        the type * to use as a loop cursor.
 637  * @head:       the head for your list.
 638  * @member:     the name of the hlist_node within the struct.
 639  * @cond:       optional lockdep expression if called from non-RCU protection.
 640  *
 641  * This list-traversal primitive may safely run concurrently with
 642  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 643  * as long as the traversal is guarded by rcu_read_lock().
 644  */
 645 #define hlist_for_each_entry_rcu(pos, head, member, cond...)            \
 646         for (__list_check_rcu(dummy, ## cond, 0),                       \
 647              pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
 648                         typeof(*(pos)), member);                        \
 649                 pos;                                                    \
 650                 pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
 651                         &(pos)->member)), typeof(*(pos)), member))
 652 
 653 /**
 654  * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
 655  * @pos:        the type * to use as a loop cursor.
 656  * @head:       the head for your list.
 657  * @member:     the name of the hlist_node within the struct.
 658  *
 659  * This list-traversal primitive may safely run concurrently with
 660  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 661  * as long as the traversal is guarded by rcu_read_lock().
 662  *
 663  * This is the same as hlist_for_each_entry_rcu() except that it does
 664  * not do any RCU debugging or tracing.
 665  */
 666 #define hlist_for_each_entry_rcu_notrace(pos, head, member)                     \
 667         for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
 668                         typeof(*(pos)), member);                        \
 669                 pos;                                                    \
 670                 pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
 671                         &(pos)->member)), typeof(*(pos)), member))
 672 
 673 /**
 674  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
 675  * @pos:        the type * to use as a loop cursor.
 676  * @head:       the head for your list.
 677  * @member:     the name of the hlist_node within the struct.
 678  *
 679  * This list-traversal primitive may safely run concurrently with
 680  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
 681  * as long as the traversal is guarded by rcu_read_lock().
 682  */
 683 #define hlist_for_each_entry_rcu_bh(pos, head, member)                  \
 684         for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
 685                         typeof(*(pos)), member);                        \
 686                 pos;                                                    \
 687                 pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
 688                         &(pos)->member)), typeof(*(pos)), member))
 689 
 690 /**
 691  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
 692  * @pos:        the type * to use as a loop cursor.
 693  * @member:     the name of the hlist_node within the struct.
 694  */
 695 #define hlist_for_each_entry_continue_rcu(pos, member)                  \
 696         for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 697                         &(pos)->member)), typeof(*(pos)), member);      \
 698              pos;                                                       \
 699              pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 700                         &(pos)->member)), typeof(*(pos)), member))
 701 
 702 /**
 703  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
 704  * @pos:        the type * to use as a loop cursor.
 705  * @member:     the name of the hlist_node within the struct.
 706  */
 707 #define hlist_for_each_entry_continue_rcu_bh(pos, member)               \
 708         for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
 709                         &(pos)->member)), typeof(*(pos)), member);      \
 710              pos;                                                       \
 711              pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
 712                         &(pos)->member)), typeof(*(pos)), member))
 713 
 714 /**
 715  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
 716  * @pos:        the type * to use as a loop cursor.
 717  * @member:     the name of the hlist_node within the struct.
 718  */
 719 #define hlist_for_each_entry_from_rcu(pos, member)                      \
 720         for (; pos;                                                     \
 721              pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
 722                         &(pos)->member)), typeof(*(pos)), member))
 723 
 724 #endif  /* __KERNEL__ */
 725 #endif

/* [<][>][^][v][top][bottom][index][help] */