root/lib/xarray.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xa_lock_type
  2. xas_lock_type
  3. xas_unlock_type
  4. xa_track_free
  5. xa_zero_busy
  6. xa_mark_set
  7. xa_mark_clear
  8. node_marks
  9. node_get_mark
  10. node_set_mark
  11. node_clear_mark
  12. node_any_mark
  13. node_mark_all
  14. xas_squash_marks
  15. get_offset
  16. xas_set_offset
  17. xas_move_index
  18. xas_advance
  19. set_bounds
  20. xas_start
  21. xas_descend
  22. xas_load
  23. xas_destroy
  24. xas_nomem
  25. __xas_nomem
  26. xas_update
  27. xas_alloc
  28. xas_size
  29. xas_max
  30. max_index
  31. xas_shrink
  32. xas_delete_node
  33. xas_free_nodes
  34. xas_expand
  35. xas_create
  36. xas_create_range
  37. update_node
  38. xas_store
  39. xas_get_mark
  40. xas_set_mark
  41. xas_clear_mark
  42. xas_init_marks
  43. xas_pause
  44. __xas_prev
  45. __xas_next
  46. xas_find
  47. xas_find_marked
  48. xas_find_conflict
  49. xa_load
  50. xas_result
  51. __xa_erase
  52. xa_erase
  53. __xa_store
  54. xa_store
  55. __xa_cmpxchg
  56. __xa_insert
  57. xas_set_range
  58. xa_store_range
  59. __xa_alloc
  60. __xa_alloc_cyclic
  61. __xa_set_mark
  62. __xa_clear_mark
  63. xa_get_mark
  64. xa_set_mark
  65. xa_clear_mark
  66. xa_find
  67. xas_sibling
  68. xa_find_after
  69. xas_extract_present
  70. xas_extract_marked
  71. xa_extract
  72. xa_destroy
  73. xa_dump_node
  74. xa_dump_index
  75. xa_dump_entry
  76. xa_dump

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * XArray implementation
   4  * Copyright (c) 2017-2018 Microsoft Corporation
   5  * Copyright (c) 2018-2020 Oracle
   6  * Author: Matthew Wilcox <willy@infradead.org>
   7  */
   8 
   9 #include <linux/bitmap.h>
  10 #include <linux/export.h>
  11 #include <linux/list.h>
  12 #include <linux/slab.h>
  13 #include <linux/xarray.h>
  14 
  15 /*
  16  * Coding conventions in this file:
  17  *
  18  * @xa is used to refer to the entire xarray.
  19  * @xas is the 'xarray operation state'.  It may be either a pointer to
  20  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
  21  * ambiguity.
  22  * @index is the index of the entry being operated on
  23  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
  24  * @node refers to an xa_node; usually the primary one being operated on by
  25  * this function.
  26  * @offset is the index into the slots array inside an xa_node.
  27  * @parent refers to the @xa_node closer to the head than @node.
  28  * @entry refers to something stored in a slot in the xarray
  29  */
  30 
  31 static inline unsigned int xa_lock_type(const struct xarray *xa)
  32 {
  33         return (__force unsigned int)xa->xa_flags & 3;
  34 }
  35 
  36 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
  37 {
  38         if (lock_type == XA_LOCK_IRQ)
  39                 xas_lock_irq(xas);
  40         else if (lock_type == XA_LOCK_BH)
  41                 xas_lock_bh(xas);
  42         else
  43                 xas_lock(xas);
  44 }
  45 
  46 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
  47 {
  48         if (lock_type == XA_LOCK_IRQ)
  49                 xas_unlock_irq(xas);
  50         else if (lock_type == XA_LOCK_BH)
  51                 xas_unlock_bh(xas);
  52         else
  53                 xas_unlock(xas);
  54 }
  55 
  56 static inline bool xa_track_free(const struct xarray *xa)
  57 {
  58         return xa->xa_flags & XA_FLAGS_TRACK_FREE;
  59 }
  60 
  61 static inline bool xa_zero_busy(const struct xarray *xa)
  62 {
  63         return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
  64 }
  65 
  66 static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
  67 {
  68         if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
  69                 xa->xa_flags |= XA_FLAGS_MARK(mark);
  70 }
  71 
  72 static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
  73 {
  74         if (xa->xa_flags & XA_FLAGS_MARK(mark))
  75                 xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
  76 }
  77 
  78 static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
  79 {
  80         return node->marks[(__force unsigned)mark];
  81 }
  82 
  83 static inline bool node_get_mark(struct xa_node *node,
  84                 unsigned int offset, xa_mark_t mark)
  85 {
  86         return test_bit(offset, node_marks(node, mark));
  87 }
  88 
  89 /* returns true if the bit was set */
  90 static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
  91                                 xa_mark_t mark)
  92 {
  93         return __test_and_set_bit(offset, node_marks(node, mark));
  94 }
  95 
  96 /* returns true if the bit was set */
  97 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
  98                                 xa_mark_t mark)
  99 {
 100         return __test_and_clear_bit(offset, node_marks(node, mark));
 101 }
 102 
 103 static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
 104 {
 105         return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
 106 }
 107 
 108 static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
 109 {
 110         bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
 111 }
 112 
 113 #define mark_inc(mark) do { \
 114         mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
 115 } while (0)
 116 
 117 /*
 118  * xas_squash_marks() - Merge all marks to the first entry
 119  * @xas: Array operation state.
 120  *
 121  * Set a mark on the first entry if any entry has it set.  Clear marks on
 122  * all sibling entries.
 123  */
 124 static void xas_squash_marks(const struct xa_state *xas)
 125 {
 126         unsigned int mark = 0;
 127         unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
 128 
 129         if (!xas->xa_sibs)
 130                 return;
 131 
 132         do {
 133                 unsigned long *marks = xas->xa_node->marks[mark];
 134                 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
 135                         continue;
 136                 __set_bit(xas->xa_offset, marks);
 137                 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
 138         } while (mark++ != (__force unsigned)XA_MARK_MAX);
 139 }
 140 
 141 /* extracts the offset within this node from the index */
 142 static unsigned int get_offset(unsigned long index, struct xa_node *node)
 143 {
 144         return (index >> node->shift) & XA_CHUNK_MASK;
 145 }
 146 
 147 static void xas_set_offset(struct xa_state *xas)
 148 {
 149         xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
 150 }
 151 
 152 /* move the index either forwards (find) or backwards (sibling slot) */
 153 static void xas_move_index(struct xa_state *xas, unsigned long offset)
 154 {
 155         unsigned int shift = xas->xa_node->shift;
 156         xas->xa_index &= ~XA_CHUNK_MASK << shift;
 157         xas->xa_index += offset << shift;
 158 }
 159 
 160 static void xas_advance(struct xa_state *xas)
 161 {
 162         xas->xa_offset++;
 163         xas_move_index(xas, xas->xa_offset);
 164 }
 165 
 166 static void *set_bounds(struct xa_state *xas)
 167 {
 168         xas->xa_node = XAS_BOUNDS;
 169         return NULL;
 170 }
 171 
 172 /*
 173  * Starts a walk.  If the @xas is already valid, we assume that it's on
 174  * the right path and just return where we've got to.  If we're in an
 175  * error state, return NULL.  If the index is outside the current scope
 176  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
 177  * set @xas->xa_node to NULL and return the current head of the array.
 178  */
 179 static void *xas_start(struct xa_state *xas)
 180 {
 181         void *entry;
 182 
 183         if (xas_valid(xas))
 184                 return xas_reload(xas);
 185         if (xas_error(xas))
 186                 return NULL;
 187 
 188         entry = xa_head(xas->xa);
 189         if (!xa_is_node(entry)) {
 190                 if (xas->xa_index)
 191                         return set_bounds(xas);
 192         } else {
 193                 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
 194                         return set_bounds(xas);
 195         }
 196 
 197         xas->xa_node = NULL;
 198         return entry;
 199 }
 200 
 201 static void *xas_descend(struct xa_state *xas, struct xa_node *node)
 202 {
 203         unsigned int offset = get_offset(xas->xa_index, node);
 204         void *entry = xa_entry(xas->xa, node, offset);
 205 
 206         xas->xa_node = node;
 207         if (xa_is_sibling(entry)) {
 208                 offset = xa_to_sibling(entry);
 209                 entry = xa_entry(xas->xa, node, offset);
 210         }
 211 
 212         xas->xa_offset = offset;
 213         return entry;
 214 }
 215 
 216 /**
 217  * xas_load() - Load an entry from the XArray (advanced).
 218  * @xas: XArray operation state.
 219  *
 220  * Usually walks the @xas to the appropriate state to load the entry
 221  * stored at xa_index.  However, it will do nothing and return %NULL if
 222  * @xas is in an error state.  xas_load() will never expand the tree.
 223  *
 224  * If the xa_state is set up to operate on a multi-index entry, xas_load()
 225  * may return %NULL or an internal entry, even if there are entries
 226  * present within the range specified by @xas.
 227  *
 228  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
 229  * Return: Usually an entry in the XArray, but see description for exceptions.
 230  */
 231 void *xas_load(struct xa_state *xas)
 232 {
 233         void *entry = xas_start(xas);
 234 
 235         while (xa_is_node(entry)) {
 236                 struct xa_node *node = xa_to_node(entry);
 237 
 238                 if (xas->xa_shift > node->shift)
 239                         break;
 240                 entry = xas_descend(xas, node);
 241                 if (node->shift == 0)
 242                         break;
 243         }
 244         return entry;
 245 }
 246 EXPORT_SYMBOL_GPL(xas_load);
 247 
 248 /* Move the radix tree node cache here */
 249 extern struct kmem_cache *radix_tree_node_cachep;
 250 extern void radix_tree_node_rcu_free(struct rcu_head *head);
 251 
 252 #define XA_RCU_FREE     ((struct xarray *)1)
 253 
 254 static void xa_node_free(struct xa_node *node)
 255 {
 256         XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 257         node->array = XA_RCU_FREE;
 258         call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 259 }
 260 
 261 /*
 262  * xas_destroy() - Free any resources allocated during the XArray operation.
 263  * @xas: XArray operation state.
 264  *
 265  * This function is now internal-only.
 266  */
 267 static void xas_destroy(struct xa_state *xas)
 268 {
 269         struct xa_node *node = xas->xa_alloc;
 270 
 271         if (!node)
 272                 return;
 273         XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 274         kmem_cache_free(radix_tree_node_cachep, node);
 275         xas->xa_alloc = NULL;
 276 }
 277 
 278 /**
 279  * xas_nomem() - Allocate memory if needed.
 280  * @xas: XArray operation state.
 281  * @gfp: Memory allocation flags.
 282  *
 283  * If we need to add new nodes to the XArray, we try to allocate memory
 284  * with GFP_NOWAIT while holding the lock, which will usually succeed.
 285  * If it fails, @xas is flagged as needing memory to continue.  The caller
 286  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
 287  * the caller should retry the operation.
 288  *
 289  * Forward progress is guaranteed as one node is allocated here and
 290  * stored in the xa_state where it will be found by xas_alloc().  More
 291  * nodes will likely be found in the slab allocator, but we do not tie
 292  * them up here.
 293  *
 294  * Return: true if memory was needed, and was successfully allocated.
 295  */
 296 bool xas_nomem(struct xa_state *xas, gfp_t gfp)
 297 {
 298         if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 299                 xas_destroy(xas);
 300                 return false;
 301         }
 302         if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 303                 gfp |= __GFP_ACCOUNT;
 304         xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 305         if (!xas->xa_alloc)
 306                 return false;
 307         XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 308         xas->xa_node = XAS_RESTART;
 309         return true;
 310 }
 311 EXPORT_SYMBOL_GPL(xas_nomem);
 312 
 313 /*
 314  * __xas_nomem() - Drop locks and allocate memory if needed.
 315  * @xas: XArray operation state.
 316  * @gfp: Memory allocation flags.
 317  *
 318  * Internal variant of xas_nomem().
 319  *
 320  * Return: true if memory was needed, and was successfully allocated.
 321  */
 322 static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
 323         __must_hold(xas->xa->xa_lock)
 324 {
 325         unsigned int lock_type = xa_lock_type(xas->xa);
 326 
 327         if (xas->xa_node != XA_ERROR(-ENOMEM)) {
 328                 xas_destroy(xas);
 329                 return false;
 330         }
 331         if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 332                 gfp |= __GFP_ACCOUNT;
 333         if (gfpflags_allow_blocking(gfp)) {
 334                 xas_unlock_type(xas, lock_type);
 335                 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 336                 xas_lock_type(xas, lock_type);
 337         } else {
 338                 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 339         }
 340         if (!xas->xa_alloc)
 341                 return false;
 342         XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
 343         xas->xa_node = XAS_RESTART;
 344         return true;
 345 }
 346 
 347 static void xas_update(struct xa_state *xas, struct xa_node *node)
 348 {
 349         if (xas->xa_update)
 350                 xas->xa_update(node);
 351         else
 352                 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 353 }
 354 
 355 static void *xas_alloc(struct xa_state *xas, unsigned int shift)
 356 {
 357         struct xa_node *parent = xas->xa_node;
 358         struct xa_node *node = xas->xa_alloc;
 359 
 360         if (xas_invalid(xas))
 361                 return NULL;
 362 
 363         if (node) {
 364                 xas->xa_alloc = NULL;
 365         } else {
 366                 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
 367 
 368                 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
 369                         gfp |= __GFP_ACCOUNT;
 370 
 371                 node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
 372                 if (!node) {
 373                         xas_set_err(xas, -ENOMEM);
 374                         return NULL;
 375                 }
 376         }
 377 
 378         if (parent) {
 379                 node->offset = xas->xa_offset;
 380                 parent->count++;
 381                 XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
 382                 xas_update(xas, parent);
 383         }
 384         XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 385         XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
 386         node->shift = shift;
 387         node->count = 0;
 388         node->nr_values = 0;
 389         RCU_INIT_POINTER(node->parent, xas->xa_node);
 390         node->array = xas->xa;
 391 
 392         return node;
 393 }
 394 
 395 #ifdef CONFIG_XARRAY_MULTI
 396 /* Returns the number of indices covered by a given xa_state */
 397 static unsigned long xas_size(const struct xa_state *xas)
 398 {
 399         return (xas->xa_sibs + 1UL) << xas->xa_shift;
 400 }
 401 #endif
 402 
 403 /*
 404  * Use this to calculate the maximum index that will need to be created
 405  * in order to add the entry described by @xas.  Because we cannot store a
 406  * multiple-index entry at index 0, the calculation is a little more complex
 407  * than you might expect.
 408  */
 409 static unsigned long xas_max(struct xa_state *xas)
 410 {
 411         unsigned long max = xas->xa_index;
 412 
 413 #ifdef CONFIG_XARRAY_MULTI
 414         if (xas->xa_shift || xas->xa_sibs) {
 415                 unsigned long mask = xas_size(xas) - 1;
 416                 max |= mask;
 417                 if (mask == max)
 418                         max++;
 419         }
 420 #endif
 421 
 422         return max;
 423 }
 424 
 425 /* The maximum index that can be contained in the array without expanding it */
 426 static unsigned long max_index(void *entry)
 427 {
 428         if (!xa_is_node(entry))
 429                 return 0;
 430         return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
 431 }
 432 
 433 static void xas_shrink(struct xa_state *xas)
 434 {
 435         struct xarray *xa = xas->xa;
 436         struct xa_node *node = xas->xa_node;
 437 
 438         for (;;) {
 439                 void *entry;
 440 
 441                 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 442                 if (node->count != 1)
 443                         break;
 444                 entry = xa_entry_locked(xa, node, 0);
 445                 if (!entry)
 446                         break;
 447                 if (!xa_is_node(entry) && node->shift)
 448                         break;
 449                 if (xa_is_zero(entry) && xa_zero_busy(xa))
 450                         entry = NULL;
 451                 xas->xa_node = XAS_BOUNDS;
 452 
 453                 RCU_INIT_POINTER(xa->xa_head, entry);
 454                 if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
 455                         xa_mark_clear(xa, XA_FREE_MARK);
 456 
 457                 node->count = 0;
 458                 node->nr_values = 0;
 459                 if (!xa_is_node(entry))
 460                         RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
 461                 xas_update(xas, node);
 462                 xa_node_free(node);
 463                 if (!xa_is_node(entry))
 464                         break;
 465                 node = xa_to_node(entry);
 466                 node->parent = NULL;
 467         }
 468 }
 469 
 470 /*
 471  * xas_delete_node() - Attempt to delete an xa_node
 472  * @xas: Array operation state.
 473  *
 474  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
 475  * a non-zero reference count.
 476  */
 477 static void xas_delete_node(struct xa_state *xas)
 478 {
 479         struct xa_node *node = xas->xa_node;
 480 
 481         for (;;) {
 482                 struct xa_node *parent;
 483 
 484                 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 485                 if (node->count)
 486                         break;
 487 
 488                 parent = xa_parent_locked(xas->xa, node);
 489                 xas->xa_node = parent;
 490                 xas->xa_offset = node->offset;
 491                 xa_node_free(node);
 492 
 493                 if (!parent) {
 494                         xas->xa->xa_head = NULL;
 495                         xas->xa_node = XAS_BOUNDS;
 496                         return;
 497                 }
 498 
 499                 parent->slots[xas->xa_offset] = NULL;
 500                 parent->count--;
 501                 XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
 502                 node = parent;
 503                 xas_update(xas, node);
 504         }
 505 
 506         if (!node->parent)
 507                 xas_shrink(xas);
 508 }
 509 
 510 /**
 511  * xas_free_nodes() - Free this node and all nodes that it references
 512  * @xas: Array operation state.
 513  * @top: Node to free
 514  *
 515  * This node has been removed from the tree.  We must now free it and all
 516  * of its subnodes.  There may be RCU walkers with references into the tree,
 517  * so we must replace all entries with retry markers.
 518  */
 519 static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
 520 {
 521         unsigned int offset = 0;
 522         struct xa_node *node = top;
 523 
 524         for (;;) {
 525                 void *entry = xa_entry_locked(xas->xa, node, offset);
 526 
 527                 if (node->shift && xa_is_node(entry)) {
 528                         node = xa_to_node(entry);
 529                         offset = 0;
 530                         continue;
 531                 }
 532                 if (entry)
 533                         RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
 534                 offset++;
 535                 while (offset == XA_CHUNK_SIZE) {
 536                         struct xa_node *parent;
 537 
 538                         parent = xa_parent_locked(xas->xa, node);
 539                         offset = node->offset + 1;
 540                         node->count = 0;
 541                         node->nr_values = 0;
 542                         xas_update(xas, node);
 543                         xa_node_free(node);
 544                         if (node == top)
 545                                 return;
 546                         node = parent;
 547                 }
 548         }
 549 }
 550 
 551 /*
 552  * xas_expand adds nodes to the head of the tree until it has reached
 553  * sufficient height to be able to contain @xas->xa_index
 554  */
 555 static int xas_expand(struct xa_state *xas, void *head)
 556 {
 557         struct xarray *xa = xas->xa;
 558         struct xa_node *node = NULL;
 559         unsigned int shift = 0;
 560         unsigned long max = xas_max(xas);
 561 
 562         if (!head) {
 563                 if (max == 0)
 564                         return 0;
 565                 while ((max >> shift) >= XA_CHUNK_SIZE)
 566                         shift += XA_CHUNK_SHIFT;
 567                 return shift + XA_CHUNK_SHIFT;
 568         } else if (xa_is_node(head)) {
 569                 node = xa_to_node(head);
 570                 shift = node->shift + XA_CHUNK_SHIFT;
 571         }
 572         xas->xa_node = NULL;
 573 
 574         while (max > max_index(head)) {
 575                 xa_mark_t mark = 0;
 576 
 577                 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
 578                 node = xas_alloc(xas, shift);
 579                 if (!node)
 580                         return -ENOMEM;
 581 
 582                 node->count = 1;
 583                 if (xa_is_value(head))
 584                         node->nr_values = 1;
 585                 RCU_INIT_POINTER(node->slots[0], head);
 586 
 587                 /* Propagate the aggregated mark info to the new child */
 588                 for (;;) {
 589                         if (xa_track_free(xa) && mark == XA_FREE_MARK) {
 590                                 node_mark_all(node, XA_FREE_MARK);
 591                                 if (!xa_marked(xa, XA_FREE_MARK)) {
 592                                         node_clear_mark(node, 0, XA_FREE_MARK);
 593                                         xa_mark_set(xa, XA_FREE_MARK);
 594                                 }
 595                         } else if (xa_marked(xa, mark)) {
 596                                 node_set_mark(node, 0, mark);
 597                         }
 598                         if (mark == XA_MARK_MAX)
 599                                 break;
 600                         mark_inc(mark);
 601                 }
 602 
 603                 /*
 604                  * Now that the new node is fully initialised, we can add
 605                  * it to the tree
 606                  */
 607                 if (xa_is_node(head)) {
 608                         xa_to_node(head)->offset = 0;
 609                         rcu_assign_pointer(xa_to_node(head)->parent, node);
 610                 }
 611                 head = xa_mk_node(node);
 612                 rcu_assign_pointer(xa->xa_head, head);
 613                 xas_update(xas, node);
 614 
 615                 shift += XA_CHUNK_SHIFT;
 616         }
 617 
 618         xas->xa_node = node;
 619         return shift;
 620 }
 621 
 622 /*
 623  * xas_create() - Create a slot to store an entry in.
 624  * @xas: XArray operation state.
 625  * @allow_root: %true if we can store the entry in the root directly
 626  *
 627  * Most users will not need to call this function directly, as it is called
 628  * by xas_store().  It is useful for doing conditional store operations
 629  * (see the xa_cmpxchg() implementation for an example).
 630  *
 631  * Return: If the slot already existed, returns the contents of this slot.
 632  * If the slot was newly created, returns %NULL.  If it failed to create the
 633  * slot, returns %NULL and indicates the error in @xas.
 634  */
 635 static void *xas_create(struct xa_state *xas, bool allow_root)
 636 {
 637         struct xarray *xa = xas->xa;
 638         void *entry;
 639         void __rcu **slot;
 640         struct xa_node *node = xas->xa_node;
 641         int shift;
 642         unsigned int order = xas->xa_shift;
 643 
 644         if (xas_top(node)) {
 645                 entry = xa_head_locked(xa);
 646                 xas->xa_node = NULL;
 647                 if (!entry && xa_zero_busy(xa))
 648                         entry = XA_ZERO_ENTRY;
 649                 shift = xas_expand(xas, entry);
 650                 if (shift < 0)
 651                         return NULL;
 652                 if (!shift && !allow_root)
 653                         shift = XA_CHUNK_SHIFT;
 654                 entry = xa_head_locked(xa);
 655                 slot = &xa->xa_head;
 656         } else if (xas_error(xas)) {
 657                 return NULL;
 658         } else if (node) {
 659                 unsigned int offset = xas->xa_offset;
 660 
 661                 shift = node->shift;
 662                 entry = xa_entry_locked(xa, node, offset);
 663                 slot = &node->slots[offset];
 664         } else {
 665                 shift = 0;
 666                 entry = xa_head_locked(xa);
 667                 slot = &xa->xa_head;
 668         }
 669 
 670         while (shift > order) {
 671                 shift -= XA_CHUNK_SHIFT;
 672                 if (!entry) {
 673                         node = xas_alloc(xas, shift);
 674                         if (!node)
 675                                 break;
 676                         if (xa_track_free(xa))
 677                                 node_mark_all(node, XA_FREE_MARK);
 678                         rcu_assign_pointer(*slot, xa_mk_node(node));
 679                 } else if (xa_is_node(entry)) {
 680                         node = xa_to_node(entry);
 681                 } else {
 682                         break;
 683                 }
 684                 entry = xas_descend(xas, node);
 685                 slot = &node->slots[xas->xa_offset];
 686         }
 687 
 688         return entry;
 689 }
 690 
 691 /**
 692  * xas_create_range() - Ensure that stores to this range will succeed
 693  * @xas: XArray operation state.
 694  *
 695  * Creates all of the slots in the range covered by @xas.  Sets @xas to
 696  * create single-index entries and positions it at the beginning of the
 697  * range.  This is for the benefit of users which have not yet been
 698  * converted to use multi-index entries.
 699  */
 700 void xas_create_range(struct xa_state *xas)
 701 {
 702         unsigned long index = xas->xa_index;
 703         unsigned char shift = xas->xa_shift;
 704         unsigned char sibs = xas->xa_sibs;
 705 
 706         xas->xa_index |= ((sibs + 1) << shift) - 1;
 707         if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
 708                 xas->xa_offset |= sibs;
 709         xas->xa_shift = 0;
 710         xas->xa_sibs = 0;
 711 
 712         for (;;) {
 713                 xas_create(xas, true);
 714                 if (xas_error(xas))
 715                         goto restore;
 716                 if (xas->xa_index <= (index | XA_CHUNK_MASK))
 717                         goto success;
 718                 xas->xa_index -= XA_CHUNK_SIZE;
 719 
 720                 for (;;) {
 721                         struct xa_node *node = xas->xa_node;
 722                         xas->xa_node = xa_parent_locked(xas->xa, node);
 723                         xas->xa_offset = node->offset - 1;
 724                         if (node->offset != 0)
 725                                 break;
 726                 }
 727         }
 728 
 729 restore:
 730         xas->xa_shift = shift;
 731         xas->xa_sibs = sibs;
 732         xas->xa_index = index;
 733         return;
 734 success:
 735         xas->xa_index = index;
 736         if (xas->xa_node)
 737                 xas_set_offset(xas);
 738 }
 739 EXPORT_SYMBOL_GPL(xas_create_range);
 740 
 741 static void update_node(struct xa_state *xas, struct xa_node *node,
 742                 int count, int values)
 743 {
 744         if (!node || (!count && !values))
 745                 return;
 746 
 747         node->count += count;
 748         node->nr_values += values;
 749         XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
 750         XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
 751         xas_update(xas, node);
 752         if (count < 0)
 753                 xas_delete_node(xas);
 754 }
 755 
 756 /**
 757  * xas_store() - Store this entry in the XArray.
 758  * @xas: XArray operation state.
 759  * @entry: New entry.
 760  *
 761  * If @xas is operating on a multi-index entry, the entry returned by this
 762  * function is essentially meaningless (it may be an internal entry or it
 763  * may be %NULL, even if there are non-NULL entries at some of the indices
 764  * covered by the range).  This is not a problem for any current users,
 765  * and can be changed if needed.
 766  *
 767  * Return: The old entry at this index.
 768  */
 769 void *xas_store(struct xa_state *xas, void *entry)
 770 {
 771         struct xa_node *node;
 772         void __rcu **slot = &xas->xa->xa_head;
 773         unsigned int offset, max;
 774         int count = 0;
 775         int values = 0;
 776         void *first, *next;
 777         bool value = xa_is_value(entry);
 778 
 779         if (entry) {
 780                 bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
 781                 first = xas_create(xas, allow_root);
 782         } else {
 783                 first = xas_load(xas);
 784         }
 785 
 786         if (xas_invalid(xas))
 787                 return first;
 788         node = xas->xa_node;
 789         if (node && (xas->xa_shift < node->shift))
 790                 xas->xa_sibs = 0;
 791         if ((first == entry) && !xas->xa_sibs)
 792                 return first;
 793 
 794         next = first;
 795         offset = xas->xa_offset;
 796         max = xas->xa_offset + xas->xa_sibs;
 797         if (node) {
 798                 slot = &node->slots[offset];
 799                 if (xas->xa_sibs)
 800                         xas_squash_marks(xas);
 801         }
 802         if (!entry)
 803                 xas_init_marks(xas);
 804 
 805         for (;;) {
 806                 /*
 807                  * Must clear the marks before setting the entry to NULL,
 808                  * otherwise xas_for_each_marked may find a NULL entry and
 809                  * stop early.  rcu_assign_pointer contains a release barrier
 810                  * so the mark clearing will appear to happen before the
 811                  * entry is set to NULL.
 812                  */
 813                 rcu_assign_pointer(*slot, entry);
 814                 if (xa_is_node(next) && (!node || node->shift))
 815                         xas_free_nodes(xas, xa_to_node(next));
 816                 if (!node)
 817                         break;
 818                 count += !next - !entry;
 819                 values += !xa_is_value(first) - !value;
 820                 if (entry) {
 821                         if (offset == max)
 822                                 break;
 823                         if (!xa_is_sibling(entry))
 824                                 entry = xa_mk_sibling(xas->xa_offset);
 825                 } else {
 826                         if (offset == XA_CHUNK_MASK)
 827                                 break;
 828                 }
 829                 next = xa_entry_locked(xas->xa, node, ++offset);
 830                 if (!xa_is_sibling(next)) {
 831                         if (!entry && (offset > max))
 832                                 break;
 833                         first = next;
 834                 }
 835                 slot++;
 836         }
 837 
 838         update_node(xas, node, count, values);
 839         return first;
 840 }
 841 EXPORT_SYMBOL_GPL(xas_store);
 842 
 843 /**
 844  * xas_get_mark() - Returns the state of this mark.
 845  * @xas: XArray operation state.
 846  * @mark: Mark number.
 847  *
 848  * Return: true if the mark is set, false if the mark is clear or @xas
 849  * is in an error state.
 850  */
 851 bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
 852 {
 853         if (xas_invalid(xas))
 854                 return false;
 855         if (!xas->xa_node)
 856                 return xa_marked(xas->xa, mark);
 857         return node_get_mark(xas->xa_node, xas->xa_offset, mark);
 858 }
 859 EXPORT_SYMBOL_GPL(xas_get_mark);
 860 
 861 /**
 862  * xas_set_mark() - Sets the mark on this entry and its parents.
 863  * @xas: XArray operation state.
 864  * @mark: Mark number.
 865  *
 866  * Sets the specified mark on this entry, and walks up the tree setting it
 867  * on all the ancestor entries.  Does nothing if @xas has not been walked to
 868  * an entry, or is in an error state.
 869  */
 870 void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
 871 {
 872         struct xa_node *node = xas->xa_node;
 873         unsigned int offset = xas->xa_offset;
 874 
 875         if (xas_invalid(xas))
 876                 return;
 877 
 878         while (node) {
 879                 if (node_set_mark(node, offset, mark))
 880                         return;
 881                 offset = node->offset;
 882                 node = xa_parent_locked(xas->xa, node);
 883         }
 884 
 885         if (!xa_marked(xas->xa, mark))
 886                 xa_mark_set(xas->xa, mark);
 887 }
 888 EXPORT_SYMBOL_GPL(xas_set_mark);
 889 
 890 /**
 891  * xas_clear_mark() - Clears the mark on this entry and its parents.
 892  * @xas: XArray operation state.
 893  * @mark: Mark number.
 894  *
 895  * Clears the specified mark on this entry, and walks back to the head
 896  * attempting to clear it on all the ancestor entries.  Does nothing if
 897  * @xas has not been walked to an entry, or is in an error state.
 898  */
 899 void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
 900 {
 901         struct xa_node *node = xas->xa_node;
 902         unsigned int offset = xas->xa_offset;
 903 
 904         if (xas_invalid(xas))
 905                 return;
 906 
 907         while (node) {
 908                 if (!node_clear_mark(node, offset, mark))
 909                         return;
 910                 if (node_any_mark(node, mark))
 911                         return;
 912 
 913                 offset = node->offset;
 914                 node = xa_parent_locked(xas->xa, node);
 915         }
 916 
 917         if (xa_marked(xas->xa, mark))
 918                 xa_mark_clear(xas->xa, mark);
 919 }
 920 EXPORT_SYMBOL_GPL(xas_clear_mark);
 921 
 922 /**
 923  * xas_init_marks() - Initialise all marks for the entry
 924  * @xas: Array operations state.
 925  *
 926  * Initialise all marks for the entry specified by @xas.  If we're tracking
 927  * free entries with a mark, we need to set it on all entries.  All other
 928  * marks are cleared.
 929  *
 930  * This implementation is not as efficient as it could be; we may walk
 931  * up the tree multiple times.
 932  */
 933 void xas_init_marks(const struct xa_state *xas)
 934 {
 935         xa_mark_t mark = 0;
 936 
 937         for (;;) {
 938                 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
 939                         xas_set_mark(xas, mark);
 940                 else
 941                         xas_clear_mark(xas, mark);
 942                 if (mark == XA_MARK_MAX)
 943                         break;
 944                 mark_inc(mark);
 945         }
 946 }
 947 EXPORT_SYMBOL_GPL(xas_init_marks);
 948 
 949 /**
 950  * xas_pause() - Pause a walk to drop a lock.
 951  * @xas: XArray operation state.
 952  *
 953  * Some users need to pause a walk and drop the lock they're holding in
 954  * order to yield to a higher priority thread or carry out an operation
 955  * on an entry.  Those users should call this function before they drop
 956  * the lock.  It resets the @xas to be suitable for the next iteration
 957  * of the loop after the user has reacquired the lock.  If most entries
 958  * found during a walk require you to call xas_pause(), the xa_for_each()
 959  * iterator may be more appropriate.
 960  *
 961  * Note that xas_pause() only works for forward iteration.  If a user needs
 962  * to pause a reverse iteration, we will need a xas_pause_rev().
 963  */
 964 void xas_pause(struct xa_state *xas)
 965 {
 966         struct xa_node *node = xas->xa_node;
 967 
 968         if (xas_invalid(xas))
 969                 return;
 970 
 971         xas->xa_node = XAS_RESTART;
 972         if (node) {
 973                 unsigned long offset = xas->xa_offset;
 974                 while (++offset < XA_CHUNK_SIZE) {
 975                         if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
 976                                 break;
 977                 }
 978                 xas->xa_index += (offset - xas->xa_offset) << node->shift;
 979                 if (xas->xa_index == 0)
 980                         xas->xa_node = XAS_BOUNDS;
 981         } else {
 982                 xas->xa_index++;
 983         }
 984 }
 985 EXPORT_SYMBOL_GPL(xas_pause);
 986 
 987 /*
 988  * __xas_prev() - Find the previous entry in the XArray.
 989  * @xas: XArray operation state.
 990  *
 991  * Helper function for xas_prev() which handles all the complex cases
 992  * out of line.
 993  */
 994 void *__xas_prev(struct xa_state *xas)
 995 {
 996         void *entry;
 997 
 998         if (!xas_frozen(xas->xa_node))
 999                 xas->xa_index--;
1000         if (!xas->xa_node)
1001                 return set_bounds(xas);
1002         if (xas_not_node(xas->xa_node))
1003                 return xas_load(xas);
1004 
1005         if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1006                 xas->xa_offset--;
1007 
1008         while (xas->xa_offset == 255) {
1009                 xas->xa_offset = xas->xa_node->offset - 1;
1010                 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1011                 if (!xas->xa_node)
1012                         return set_bounds(xas);
1013         }
1014 
1015         for (;;) {
1016                 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1017                 if (!xa_is_node(entry))
1018                         return entry;
1019 
1020                 xas->xa_node = xa_to_node(entry);
1021                 xas_set_offset(xas);
1022         }
1023 }
1024 EXPORT_SYMBOL_GPL(__xas_prev);
1025 
1026 /*
1027  * __xas_next() - Find the next entry in the XArray.
1028  * @xas: XArray operation state.
1029  *
1030  * Helper function for xas_next() which handles all the complex cases
1031  * out of line.
1032  */
1033 void *__xas_next(struct xa_state *xas)
1034 {
1035         void *entry;
1036 
1037         if (!xas_frozen(xas->xa_node))
1038                 xas->xa_index++;
1039         if (!xas->xa_node)
1040                 return set_bounds(xas);
1041         if (xas_not_node(xas->xa_node))
1042                 return xas_load(xas);
1043 
1044         if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1045                 xas->xa_offset++;
1046 
1047         while (xas->xa_offset == XA_CHUNK_SIZE) {
1048                 xas->xa_offset = xas->xa_node->offset + 1;
1049                 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1050                 if (!xas->xa_node)
1051                         return set_bounds(xas);
1052         }
1053 
1054         for (;;) {
1055                 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1056                 if (!xa_is_node(entry))
1057                         return entry;
1058 
1059                 xas->xa_node = xa_to_node(entry);
1060                 xas_set_offset(xas);
1061         }
1062 }
1063 EXPORT_SYMBOL_GPL(__xas_next);
1064 
1065 /**
1066  * xas_find() - Find the next present entry in the XArray.
1067  * @xas: XArray operation state.
1068  * @max: Highest index to return.
1069  *
1070  * If the @xas has not yet been walked to an entry, return the entry
1071  * which has an index >= xas.xa_index.  If it has been walked, the entry
1072  * currently being pointed at has been processed, and so we move to the
1073  * next entry.
1074  *
1075  * If no entry is found and the array is smaller than @max, the iterator
1076  * is set to the smallest index not yet in the array.  This allows @xas
1077  * to be immediately passed to xas_store().
1078  *
1079  * Return: The entry, if found, otherwise %NULL.
1080  */
1081 void *xas_find(struct xa_state *xas, unsigned long max)
1082 {
1083         void *entry;
1084 
1085         if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1086                 return NULL;
1087         if (xas->xa_index > max)
1088                 return set_bounds(xas);
1089 
1090         if (!xas->xa_node) {
1091                 xas->xa_index = 1;
1092                 return set_bounds(xas);
1093         } else if (xas->xa_node == XAS_RESTART) {
1094                 entry = xas_load(xas);
1095                 if (entry || xas_not_node(xas->xa_node))
1096                         return entry;
1097         } else if (!xas->xa_node->shift &&
1098                     xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1099                 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1100         }
1101 
1102         xas_advance(xas);
1103 
1104         while (xas->xa_node && (xas->xa_index <= max)) {
1105                 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1106                         xas->xa_offset = xas->xa_node->offset + 1;
1107                         xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1108                         continue;
1109                 }
1110 
1111                 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1112                 if (xa_is_node(entry)) {
1113                         xas->xa_node = xa_to_node(entry);
1114                         xas->xa_offset = 0;
1115                         continue;
1116                 }
1117                 if (entry && !xa_is_sibling(entry))
1118                         return entry;
1119 
1120                 xas_advance(xas);
1121         }
1122 
1123         if (!xas->xa_node)
1124                 xas->xa_node = XAS_BOUNDS;
1125         return NULL;
1126 }
1127 EXPORT_SYMBOL_GPL(xas_find);
1128 
1129 /**
1130  * xas_find_marked() - Find the next marked entry in the XArray.
1131  * @xas: XArray operation state.
1132  * @max: Highest index to return.
1133  * @mark: Mark number to search for.
1134  *
1135  * If the @xas has not yet been walked to an entry, return the marked entry
1136  * which has an index >= xas.xa_index.  If it has been walked, the entry
1137  * currently being pointed at has been processed, and so we return the
1138  * first marked entry with an index > xas.xa_index.
1139  *
1140  * If no marked entry is found and the array is smaller than @max, @xas is
1141  * set to the bounds state and xas->xa_index is set to the smallest index
1142  * not yet in the array.  This allows @xas to be immediately passed to
1143  * xas_store().
1144  *
1145  * If no entry is found before @max is reached, @xas is set to the restart
1146  * state.
1147  *
1148  * Return: The entry, if found, otherwise %NULL.
1149  */
1150 void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1151 {
1152         bool advance = true;
1153         unsigned int offset;
1154         void *entry;
1155 
1156         if (xas_error(xas))
1157                 return NULL;
1158         if (xas->xa_index > max)
1159                 goto max;
1160 
1161         if (!xas->xa_node) {
1162                 xas->xa_index = 1;
1163                 goto out;
1164         } else if (xas_top(xas->xa_node)) {
1165                 advance = false;
1166                 entry = xa_head(xas->xa);
1167                 xas->xa_node = NULL;
1168                 if (xas->xa_index > max_index(entry))
1169                         goto out;
1170                 if (!xa_is_node(entry)) {
1171                         if (xa_marked(xas->xa, mark))
1172                                 return entry;
1173                         xas->xa_index = 1;
1174                         goto out;
1175                 }
1176                 xas->xa_node = xa_to_node(entry);
1177                 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1178         }
1179 
1180         while (xas->xa_index <= max) {
1181                 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1182                         xas->xa_offset = xas->xa_node->offset + 1;
1183                         xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1184                         if (!xas->xa_node)
1185                                 break;
1186                         advance = false;
1187                         continue;
1188                 }
1189 
1190                 if (!advance) {
1191                         entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1192                         if (xa_is_sibling(entry)) {
1193                                 xas->xa_offset = xa_to_sibling(entry);
1194                                 xas_move_index(xas, xas->xa_offset);
1195                         }
1196                 }
1197 
1198                 offset = xas_find_chunk(xas, advance, mark);
1199                 if (offset > xas->xa_offset) {
1200                         advance = false;
1201                         xas_move_index(xas, offset);
1202                         /* Mind the wrap */
1203                         if ((xas->xa_index - 1) >= max)
1204                                 goto max;
1205                         xas->xa_offset = offset;
1206                         if (offset == XA_CHUNK_SIZE)
1207                                 continue;
1208                 }
1209 
1210                 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1211                 if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1212                         continue;
1213                 if (!xa_is_node(entry))
1214                         return entry;
1215                 xas->xa_node = xa_to_node(entry);
1216                 xas_set_offset(xas);
1217         }
1218 
1219 out:
1220         if (xas->xa_index > max)
1221                 goto max;
1222         return set_bounds(xas);
1223 max:
1224         xas->xa_node = XAS_RESTART;
1225         return NULL;
1226 }
1227 EXPORT_SYMBOL_GPL(xas_find_marked);
1228 
1229 /**
1230  * xas_find_conflict() - Find the next present entry in a range.
1231  * @xas: XArray operation state.
1232  *
1233  * The @xas describes both a range and a position within that range.
1234  *
1235  * Context: Any context.  Expects xa_lock to be held.
1236  * Return: The next entry in the range covered by @xas or %NULL.
1237  */
1238 void *xas_find_conflict(struct xa_state *xas)
1239 {
1240         void *curr;
1241 
1242         if (xas_error(xas))
1243                 return NULL;
1244 
1245         if (!xas->xa_node)
1246                 return NULL;
1247 
1248         if (xas_top(xas->xa_node)) {
1249                 curr = xas_start(xas);
1250                 if (!curr)
1251                         return NULL;
1252                 while (xa_is_node(curr)) {
1253                         struct xa_node *node = xa_to_node(curr);
1254                         curr = xas_descend(xas, node);
1255                 }
1256                 if (curr)
1257                         return curr;
1258         }
1259 
1260         if (xas->xa_node->shift > xas->xa_shift)
1261                 return NULL;
1262 
1263         for (;;) {
1264                 if (xas->xa_node->shift == xas->xa_shift) {
1265                         if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1266                                 break;
1267                 } else if (xas->xa_offset == XA_CHUNK_MASK) {
1268                         xas->xa_offset = xas->xa_node->offset;
1269                         xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1270                         if (!xas->xa_node)
1271                                 break;
1272                         continue;
1273                 }
1274                 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1275                 if (xa_is_sibling(curr))
1276                         continue;
1277                 while (xa_is_node(curr)) {
1278                         xas->xa_node = xa_to_node(curr);
1279                         xas->xa_offset = 0;
1280                         curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1281                 }
1282                 if (curr)
1283                         return curr;
1284         }
1285         xas->xa_offset -= xas->xa_sibs;
1286         return NULL;
1287 }
1288 EXPORT_SYMBOL_GPL(xas_find_conflict);
1289 
1290 /**
1291  * xa_load() - Load an entry from an XArray.
1292  * @xa: XArray.
1293  * @index: index into array.
1294  *
1295  * Context: Any context.  Takes and releases the RCU lock.
1296  * Return: The entry at @index in @xa.
1297  */
1298 void *xa_load(struct xarray *xa, unsigned long index)
1299 {
1300         XA_STATE(xas, xa, index);
1301         void *entry;
1302 
1303         rcu_read_lock();
1304         do {
1305                 entry = xas_load(&xas);
1306                 if (xa_is_zero(entry))
1307                         entry = NULL;
1308         } while (xas_retry(&xas, entry));
1309         rcu_read_unlock();
1310 
1311         return entry;
1312 }
1313 EXPORT_SYMBOL(xa_load);
1314 
1315 static void *xas_result(struct xa_state *xas, void *curr)
1316 {
1317         if (xa_is_zero(curr))
1318                 return NULL;
1319         if (xas_error(xas))
1320                 curr = xas->xa_node;
1321         return curr;
1322 }
1323 
1324 /**
1325  * __xa_erase() - Erase this entry from the XArray while locked.
1326  * @xa: XArray.
1327  * @index: Index into array.
1328  *
1329  * After this function returns, loading from @index will return %NULL.
1330  * If the index is part of a multi-index entry, all indices will be erased
1331  * and none of the entries will be part of a multi-index entry.
1332  *
1333  * Context: Any context.  Expects xa_lock to be held on entry.
1334  * Return: The entry which used to be at this index.
1335  */
1336 void *__xa_erase(struct xarray *xa, unsigned long index)
1337 {
1338         XA_STATE(xas, xa, index);
1339         return xas_result(&xas, xas_store(&xas, NULL));
1340 }
1341 EXPORT_SYMBOL(__xa_erase);
1342 
1343 /**
1344  * xa_erase() - Erase this entry from the XArray.
1345  * @xa: XArray.
1346  * @index: Index of entry.
1347  *
1348  * After this function returns, loading from @index will return %NULL.
1349  * If the index is part of a multi-index entry, all indices will be erased
1350  * and none of the entries will be part of a multi-index entry.
1351  *
1352  * Context: Any context.  Takes and releases the xa_lock.
1353  * Return: The entry which used to be at this index.
1354  */
1355 void *xa_erase(struct xarray *xa, unsigned long index)
1356 {
1357         void *entry;
1358 
1359         xa_lock(xa);
1360         entry = __xa_erase(xa, index);
1361         xa_unlock(xa);
1362 
1363         return entry;
1364 }
1365 EXPORT_SYMBOL(xa_erase);
1366 
1367 /**
1368  * __xa_store() - Store this entry in the XArray.
1369  * @xa: XArray.
1370  * @index: Index into array.
1371  * @entry: New entry.
1372  * @gfp: Memory allocation flags.
1373  *
1374  * You must already be holding the xa_lock when calling this function.
1375  * It will drop the lock if needed to allocate memory, and then reacquire
1376  * it afterwards.
1377  *
1378  * Context: Any context.  Expects xa_lock to be held on entry.  May
1379  * release and reacquire xa_lock if @gfp flags permit.
1380  * Return: The old entry at this index or xa_err() if an error happened.
1381  */
1382 void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1383 {
1384         XA_STATE(xas, xa, index);
1385         void *curr;
1386 
1387         if (WARN_ON_ONCE(xa_is_advanced(entry)))
1388                 return XA_ERROR(-EINVAL);
1389         if (xa_track_free(xa) && !entry)
1390                 entry = XA_ZERO_ENTRY;
1391 
1392         do {
1393                 curr = xas_store(&xas, entry);
1394                 if (xa_track_free(xa))
1395                         xas_clear_mark(&xas, XA_FREE_MARK);
1396         } while (__xas_nomem(&xas, gfp));
1397 
1398         return xas_result(&xas, curr);
1399 }
1400 EXPORT_SYMBOL(__xa_store);
1401 
1402 /**
1403  * xa_store() - Store this entry in the XArray.
1404  * @xa: XArray.
1405  * @index: Index into array.
1406  * @entry: New entry.
1407  * @gfp: Memory allocation flags.
1408  *
1409  * After this function returns, loads from this index will return @entry.
1410  * Storing into an existing multislot entry updates the entry of every index.
1411  * The marks associated with @index are unaffected unless @entry is %NULL.
1412  *
1413  * Context: Any context.  Takes and releases the xa_lock.
1414  * May sleep if the @gfp flags permit.
1415  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1416  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1417  * failed.
1418  */
1419 void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1420 {
1421         void *curr;
1422 
1423         xa_lock(xa);
1424         curr = __xa_store(xa, index, entry, gfp);
1425         xa_unlock(xa);
1426 
1427         return curr;
1428 }
1429 EXPORT_SYMBOL(xa_store);
1430 
1431 /**
1432  * __xa_cmpxchg() - Store this entry in the XArray.
1433  * @xa: XArray.
1434  * @index: Index into array.
1435  * @old: Old value to test against.
1436  * @entry: New entry.
1437  * @gfp: Memory allocation flags.
1438  *
1439  * You must already be holding the xa_lock when calling this function.
1440  * It will drop the lock if needed to allocate memory, and then reacquire
1441  * it afterwards.
1442  *
1443  * Context: Any context.  Expects xa_lock to be held on entry.  May
1444  * release and reacquire xa_lock if @gfp flags permit.
1445  * Return: The old entry at this index or xa_err() if an error happened.
1446  */
1447 void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1448                         void *old, void *entry, gfp_t gfp)
1449 {
1450         XA_STATE(xas, xa, index);
1451         void *curr;
1452 
1453         if (WARN_ON_ONCE(xa_is_advanced(entry)))
1454                 return XA_ERROR(-EINVAL);
1455 
1456         do {
1457                 curr = xas_load(&xas);
1458                 if (curr == old) {
1459                         xas_store(&xas, entry);
1460                         if (xa_track_free(xa) && entry && !curr)
1461                                 xas_clear_mark(&xas, XA_FREE_MARK);
1462                 }
1463         } while (__xas_nomem(&xas, gfp));
1464 
1465         return xas_result(&xas, curr);
1466 }
1467 EXPORT_SYMBOL(__xa_cmpxchg);
1468 
1469 /**
1470  * __xa_insert() - Store this entry in the XArray if no entry is present.
1471  * @xa: XArray.
1472  * @index: Index into array.
1473  * @entry: New entry.
1474  * @gfp: Memory allocation flags.
1475  *
1476  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1477  * if no entry is present.  Inserting will fail if a reserved entry is
1478  * present, even though loading from this index will return NULL.
1479  *
1480  * Context: Any context.  Expects xa_lock to be held on entry.  May
1481  * release and reacquire xa_lock if @gfp flags permit.
1482  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1483  * -ENOMEM if memory could not be allocated.
1484  */
1485 int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1486 {
1487         XA_STATE(xas, xa, index);
1488         void *curr;
1489 
1490         if (WARN_ON_ONCE(xa_is_advanced(entry)))
1491                 return -EINVAL;
1492         if (!entry)
1493                 entry = XA_ZERO_ENTRY;
1494 
1495         do {
1496                 curr = xas_load(&xas);
1497                 if (!curr) {
1498                         xas_store(&xas, entry);
1499                         if (xa_track_free(xa))
1500                                 xas_clear_mark(&xas, XA_FREE_MARK);
1501                 } else {
1502                         xas_set_err(&xas, -EBUSY);
1503                 }
1504         } while (__xas_nomem(&xas, gfp));
1505 
1506         return xas_error(&xas);
1507 }
1508 EXPORT_SYMBOL(__xa_insert);
1509 
1510 #ifdef CONFIG_XARRAY_MULTI
1511 static void xas_set_range(struct xa_state *xas, unsigned long first,
1512                 unsigned long last)
1513 {
1514         unsigned int shift = 0;
1515         unsigned long sibs = last - first;
1516         unsigned int offset = XA_CHUNK_MASK;
1517 
1518         xas_set(xas, first);
1519 
1520         while ((first & XA_CHUNK_MASK) == 0) {
1521                 if (sibs < XA_CHUNK_MASK)
1522                         break;
1523                 if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1524                         break;
1525                 shift += XA_CHUNK_SHIFT;
1526                 if (offset == XA_CHUNK_MASK)
1527                         offset = sibs & XA_CHUNK_MASK;
1528                 sibs >>= XA_CHUNK_SHIFT;
1529                 first >>= XA_CHUNK_SHIFT;
1530         }
1531 
1532         offset = first & XA_CHUNK_MASK;
1533         if (offset + sibs > XA_CHUNK_MASK)
1534                 sibs = XA_CHUNK_MASK - offset;
1535         if ((((first + sibs + 1) << shift) - 1) > last)
1536                 sibs -= 1;
1537 
1538         xas->xa_shift = shift;
1539         xas->xa_sibs = sibs;
1540 }
1541 
1542 /**
1543  * xa_store_range() - Store this entry at a range of indices in the XArray.
1544  * @xa: XArray.
1545  * @first: First index to affect.
1546  * @last: Last index to affect.
1547  * @entry: New entry.
1548  * @gfp: Memory allocation flags.
1549  *
1550  * After this function returns, loads from any index between @first and @last,
1551  * inclusive will return @entry.
1552  * Storing into an existing multislot entry updates the entry of every index.
1553  * The marks associated with @index are unaffected unless @entry is %NULL.
1554  *
1555  * Context: Process context.  Takes and releases the xa_lock.  May sleep
1556  * if the @gfp flags permit.
1557  * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1558  * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1559  */
1560 void *xa_store_range(struct xarray *xa, unsigned long first,
1561                 unsigned long last, void *entry, gfp_t gfp)
1562 {
1563         XA_STATE(xas, xa, 0);
1564 
1565         if (WARN_ON_ONCE(xa_is_internal(entry)))
1566                 return XA_ERROR(-EINVAL);
1567         if (last < first)
1568                 return XA_ERROR(-EINVAL);
1569 
1570         do {
1571                 xas_lock(&xas);
1572                 if (entry) {
1573                         unsigned int order = BITS_PER_LONG;
1574                         if (last + 1)
1575                                 order = __ffs(last + 1);
1576                         xas_set_order(&xas, last, order);
1577                         xas_create(&xas, true);
1578                         if (xas_error(&xas))
1579                                 goto unlock;
1580                 }
1581                 do {
1582                         xas_set_range(&xas, first, last);
1583                         xas_store(&xas, entry);
1584                         if (xas_error(&xas))
1585                                 goto unlock;
1586                         first += xas_size(&xas);
1587                 } while (first <= last);
1588 unlock:
1589                 xas_unlock(&xas);
1590         } while (xas_nomem(&xas, gfp));
1591 
1592         return xas_result(&xas, NULL);
1593 }
1594 EXPORT_SYMBOL(xa_store_range);
1595 #endif /* CONFIG_XARRAY_MULTI */
1596 
1597 /**
1598  * __xa_alloc() - Find somewhere to store this entry in the XArray.
1599  * @xa: XArray.
1600  * @id: Pointer to ID.
1601  * @limit: Range for allocated ID.
1602  * @entry: New entry.
1603  * @gfp: Memory allocation flags.
1604  *
1605  * Finds an empty entry in @xa between @limit.min and @limit.max,
1606  * stores the index into the @id pointer, then stores the entry at
1607  * that index.  A concurrent lookup will not see an uninitialised @id.
1608  *
1609  * Context: Any context.  Expects xa_lock to be held on entry.  May
1610  * release and reacquire xa_lock if @gfp flags permit.
1611  * Return: 0 on success, -ENOMEM if memory could not be allocated or
1612  * -EBUSY if there are no free entries in @limit.
1613  */
1614 int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1615                 struct xa_limit limit, gfp_t gfp)
1616 {
1617         XA_STATE(xas, xa, 0);
1618 
1619         if (WARN_ON_ONCE(xa_is_advanced(entry)))
1620                 return -EINVAL;
1621         if (WARN_ON_ONCE(!xa_track_free(xa)))
1622                 return -EINVAL;
1623 
1624         if (!entry)
1625                 entry = XA_ZERO_ENTRY;
1626 
1627         do {
1628                 xas.xa_index = limit.min;
1629                 xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1630                 if (xas.xa_node == XAS_RESTART)
1631                         xas_set_err(&xas, -EBUSY);
1632                 else
1633                         *id = xas.xa_index;
1634                 xas_store(&xas, entry);
1635                 xas_clear_mark(&xas, XA_FREE_MARK);
1636         } while (__xas_nomem(&xas, gfp));
1637 
1638         return xas_error(&xas);
1639 }
1640 EXPORT_SYMBOL(__xa_alloc);
1641 
1642 /**
1643  * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1644  * @xa: XArray.
1645  * @id: Pointer to ID.
1646  * @entry: New entry.
1647  * @limit: Range of allocated ID.
1648  * @next: Pointer to next ID to allocate.
1649  * @gfp: Memory allocation flags.
1650  *
1651  * Finds an empty entry in @xa between @limit.min and @limit.max,
1652  * stores the index into the @id pointer, then stores the entry at
1653  * that index.  A concurrent lookup will not see an uninitialised @id.
1654  * The search for an empty entry will start at @next and will wrap
1655  * around if necessary.
1656  *
1657  * Context: Any context.  Expects xa_lock to be held on entry.  May
1658  * release and reacquire xa_lock if @gfp flags permit.
1659  * Return: 0 if the allocation succeeded without wrapping.  1 if the
1660  * allocation succeeded after wrapping, -ENOMEM if memory could not be
1661  * allocated or -EBUSY if there are no free entries in @limit.
1662  */
1663 int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1664                 struct xa_limit limit, u32 *next, gfp_t gfp)
1665 {
1666         u32 min = limit.min;
1667         int ret;
1668 
1669         limit.min = max(min, *next);
1670         ret = __xa_alloc(xa, id, entry, limit, gfp);
1671         if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1672                 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1673                 ret = 1;
1674         }
1675 
1676         if (ret < 0 && limit.min > min) {
1677                 limit.min = min;
1678                 ret = __xa_alloc(xa, id, entry, limit, gfp);
1679                 if (ret == 0)
1680                         ret = 1;
1681         }
1682 
1683         if (ret >= 0) {
1684                 *next = *id + 1;
1685                 if (*next == 0)
1686                         xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1687         }
1688         return ret;
1689 }
1690 EXPORT_SYMBOL(__xa_alloc_cyclic);
1691 
1692 /**
1693  * __xa_set_mark() - Set this mark on this entry while locked.
1694  * @xa: XArray.
1695  * @index: Index of entry.
1696  * @mark: Mark number.
1697  *
1698  * Attempting to set a mark on a %NULL entry does not succeed.
1699  *
1700  * Context: Any context.  Expects xa_lock to be held on entry.
1701  */
1702 void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1703 {
1704         XA_STATE(xas, xa, index);
1705         void *entry = xas_load(&xas);
1706 
1707         if (entry)
1708                 xas_set_mark(&xas, mark);
1709 }
1710 EXPORT_SYMBOL(__xa_set_mark);
1711 
1712 /**
1713  * __xa_clear_mark() - Clear this mark on this entry while locked.
1714  * @xa: XArray.
1715  * @index: Index of entry.
1716  * @mark: Mark number.
1717  *
1718  * Context: Any context.  Expects xa_lock to be held on entry.
1719  */
1720 void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1721 {
1722         XA_STATE(xas, xa, index);
1723         void *entry = xas_load(&xas);
1724 
1725         if (entry)
1726                 xas_clear_mark(&xas, mark);
1727 }
1728 EXPORT_SYMBOL(__xa_clear_mark);
1729 
1730 /**
1731  * xa_get_mark() - Inquire whether this mark is set on this entry.
1732  * @xa: XArray.
1733  * @index: Index of entry.
1734  * @mark: Mark number.
1735  *
1736  * This function uses the RCU read lock, so the result may be out of date
1737  * by the time it returns.  If you need the result to be stable, use a lock.
1738  *
1739  * Context: Any context.  Takes and releases the RCU lock.
1740  * Return: True if the entry at @index has this mark set, false if it doesn't.
1741  */
1742 bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1743 {
1744         XA_STATE(xas, xa, index);
1745         void *entry;
1746 
1747         rcu_read_lock();
1748         entry = xas_start(&xas);
1749         while (xas_get_mark(&xas, mark)) {
1750                 if (!xa_is_node(entry))
1751                         goto found;
1752                 entry = xas_descend(&xas, xa_to_node(entry));
1753         }
1754         rcu_read_unlock();
1755         return false;
1756  found:
1757         rcu_read_unlock();
1758         return true;
1759 }
1760 EXPORT_SYMBOL(xa_get_mark);
1761 
1762 /**
1763  * xa_set_mark() - Set this mark on this entry.
1764  * @xa: XArray.
1765  * @index: Index of entry.
1766  * @mark: Mark number.
1767  *
1768  * Attempting to set a mark on a %NULL entry does not succeed.
1769  *
1770  * Context: Process context.  Takes and releases the xa_lock.
1771  */
1772 void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1773 {
1774         xa_lock(xa);
1775         __xa_set_mark(xa, index, mark);
1776         xa_unlock(xa);
1777 }
1778 EXPORT_SYMBOL(xa_set_mark);
1779 
1780 /**
1781  * xa_clear_mark() - Clear this mark on this entry.
1782  * @xa: XArray.
1783  * @index: Index of entry.
1784  * @mark: Mark number.
1785  *
1786  * Clearing a mark always succeeds.
1787  *
1788  * Context: Process context.  Takes and releases the xa_lock.
1789  */
1790 void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1791 {
1792         xa_lock(xa);
1793         __xa_clear_mark(xa, index, mark);
1794         xa_unlock(xa);
1795 }
1796 EXPORT_SYMBOL(xa_clear_mark);
1797 
1798 /**
1799  * xa_find() - Search the XArray for an entry.
1800  * @xa: XArray.
1801  * @indexp: Pointer to an index.
1802  * @max: Maximum index to search to.
1803  * @filter: Selection criterion.
1804  *
1805  * Finds the entry in @xa which matches the @filter, and has the lowest
1806  * index that is at least @indexp and no more than @max.
1807  * If an entry is found, @indexp is updated to be the index of the entry.
1808  * This function is protected by the RCU read lock, so it may not find
1809  * entries which are being simultaneously added.  It will not return an
1810  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1811  *
1812  * Context: Any context.  Takes and releases the RCU lock.
1813  * Return: The entry, if found, otherwise %NULL.
1814  */
1815 void *xa_find(struct xarray *xa, unsigned long *indexp,
1816                         unsigned long max, xa_mark_t filter)
1817 {
1818         XA_STATE(xas, xa, *indexp);
1819         void *entry;
1820 
1821         rcu_read_lock();
1822         do {
1823                 if ((__force unsigned int)filter < XA_MAX_MARKS)
1824                         entry = xas_find_marked(&xas, max, filter);
1825                 else
1826                         entry = xas_find(&xas, max);
1827         } while (xas_retry(&xas, entry));
1828         rcu_read_unlock();
1829 
1830         if (entry)
1831                 *indexp = xas.xa_index;
1832         return entry;
1833 }
1834 EXPORT_SYMBOL(xa_find);
1835 
1836 static bool xas_sibling(struct xa_state *xas)
1837 {
1838         struct xa_node *node = xas->xa_node;
1839         unsigned long mask;
1840 
1841         if (!node)
1842                 return false;
1843         mask = (XA_CHUNK_SIZE << node->shift) - 1;
1844         return (xas->xa_index & mask) >
1845                 ((unsigned long)xas->xa_offset << node->shift);
1846 }
1847 
1848 /**
1849  * xa_find_after() - Search the XArray for a present entry.
1850  * @xa: XArray.
1851  * @indexp: Pointer to an index.
1852  * @max: Maximum index to search to.
1853  * @filter: Selection criterion.
1854  *
1855  * Finds the entry in @xa which matches the @filter and has the lowest
1856  * index that is above @indexp and no more than @max.
1857  * If an entry is found, @indexp is updated to be the index of the entry.
1858  * This function is protected by the RCU read lock, so it may miss entries
1859  * which are being simultaneously added.  It will not return an
1860  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1861  *
1862  * Context: Any context.  Takes and releases the RCU lock.
1863  * Return: The pointer, if found, otherwise %NULL.
1864  */
1865 void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1866                         unsigned long max, xa_mark_t filter)
1867 {
1868         XA_STATE(xas, xa, *indexp + 1);
1869         void *entry;
1870 
1871         if (xas.xa_index == 0)
1872                 return NULL;
1873 
1874         rcu_read_lock();
1875         for (;;) {
1876                 if ((__force unsigned int)filter < XA_MAX_MARKS)
1877                         entry = xas_find_marked(&xas, max, filter);
1878                 else
1879                         entry = xas_find(&xas, max);
1880 
1881                 if (xas_invalid(&xas))
1882                         break;
1883                 if (xas_sibling(&xas))
1884                         continue;
1885                 if (!xas_retry(&xas, entry))
1886                         break;
1887         }
1888         rcu_read_unlock();
1889 
1890         if (entry)
1891                 *indexp = xas.xa_index;
1892         return entry;
1893 }
1894 EXPORT_SYMBOL(xa_find_after);
1895 
1896 static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
1897                         unsigned long max, unsigned int n)
1898 {
1899         void *entry;
1900         unsigned int i = 0;
1901 
1902         rcu_read_lock();
1903         xas_for_each(xas, entry, max) {
1904                 if (xas_retry(xas, entry))
1905                         continue;
1906                 dst[i++] = entry;
1907                 if (i == n)
1908                         break;
1909         }
1910         rcu_read_unlock();
1911 
1912         return i;
1913 }
1914 
1915 static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1916                         unsigned long max, unsigned int n, xa_mark_t mark)
1917 {
1918         void *entry;
1919         unsigned int i = 0;
1920 
1921         rcu_read_lock();
1922         xas_for_each_marked(xas, entry, max, mark) {
1923                 if (xas_retry(xas, entry))
1924                         continue;
1925                 dst[i++] = entry;
1926                 if (i == n)
1927                         break;
1928         }
1929         rcu_read_unlock();
1930 
1931         return i;
1932 }
1933 
1934 /**
1935  * xa_extract() - Copy selected entries from the XArray into a normal array.
1936  * @xa: The source XArray to copy from.
1937  * @dst: The buffer to copy entries into.
1938  * @start: The first index in the XArray eligible to be selected.
1939  * @max: The last index in the XArray eligible to be selected.
1940  * @n: The maximum number of entries to copy.
1941  * @filter: Selection criterion.
1942  *
1943  * Copies up to @n entries that match @filter from the XArray.  The
1944  * copied entries will have indices between @start and @max, inclusive.
1945  *
1946  * The @filter may be an XArray mark value, in which case entries which are
1947  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
1948  * which case all entries which are not %NULL will be copied.
1949  *
1950  * The entries returned may not represent a snapshot of the XArray at a
1951  * moment in time.  For example, if another thread stores to index 5, then
1952  * index 10, calling xa_extract() may return the old contents of index 5
1953  * and the new contents of index 10.  Indices not modified while this
1954  * function is running will not be skipped.
1955  *
1956  * If you need stronger guarantees, holding the xa_lock across calls to this
1957  * function will prevent concurrent modification.
1958  *
1959  * Context: Any context.  Takes and releases the RCU lock.
1960  * Return: The number of entries copied.
1961  */
1962 unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
1963                         unsigned long max, unsigned int n, xa_mark_t filter)
1964 {
1965         XA_STATE(xas, xa, start);
1966 
1967         if (!n)
1968                 return 0;
1969 
1970         if ((__force unsigned int)filter < XA_MAX_MARKS)
1971                 return xas_extract_marked(&xas, dst, max, n, filter);
1972         return xas_extract_present(&xas, dst, max, n);
1973 }
1974 EXPORT_SYMBOL(xa_extract);
1975 
1976 /**
1977  * xa_destroy() - Free all internal data structures.
1978  * @xa: XArray.
1979  *
1980  * After calling this function, the XArray is empty and has freed all memory
1981  * allocated for its internal data structures.  You are responsible for
1982  * freeing the objects referenced by the XArray.
1983  *
1984  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
1985  */
1986 void xa_destroy(struct xarray *xa)
1987 {
1988         XA_STATE(xas, xa, 0);
1989         unsigned long flags;
1990         void *entry;
1991 
1992         xas.xa_node = NULL;
1993         xas_lock_irqsave(&xas, flags);
1994         entry = xa_head_locked(xa);
1995         RCU_INIT_POINTER(xa->xa_head, NULL);
1996         xas_init_marks(&xas);
1997         if (xa_zero_busy(xa))
1998                 xa_mark_clear(xa, XA_FREE_MARK);
1999         /* lockdep checks we're still holding the lock in xas_free_nodes() */
2000         if (xa_is_node(entry))
2001                 xas_free_nodes(&xas, xa_to_node(entry));
2002         xas_unlock_irqrestore(&xas, flags);
2003 }
2004 EXPORT_SYMBOL(xa_destroy);
2005 
2006 #ifdef XA_DEBUG
2007 void xa_dump_node(const struct xa_node *node)
2008 {
2009         unsigned i, j;
2010 
2011         if (!node)
2012                 return;
2013         if ((unsigned long)node & 3) {
2014                 pr_cont("node %px\n", node);
2015                 return;
2016         }
2017 
2018         pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2019                 "array %px list %px %px marks",
2020                 node, node->parent ? "offset" : "max", node->offset,
2021                 node->parent, node->shift, node->count, node->nr_values,
2022                 node->array, node->private_list.prev, node->private_list.next);
2023         for (i = 0; i < XA_MAX_MARKS; i++)
2024                 for (j = 0; j < XA_MARK_LONGS; j++)
2025                         pr_cont(" %lx", node->marks[i][j]);
2026         pr_cont("\n");
2027 }
2028 
2029 void xa_dump_index(unsigned long index, unsigned int shift)
2030 {
2031         if (!shift)
2032                 pr_info("%lu: ", index);
2033         else if (shift >= BITS_PER_LONG)
2034                 pr_info("0-%lu: ", ~0UL);
2035         else
2036                 pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2037 }
2038 
2039 void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2040 {
2041         if (!entry)
2042                 return;
2043 
2044         xa_dump_index(index, shift);
2045 
2046         if (xa_is_node(entry)) {
2047                 if (shift == 0) {
2048                         pr_cont("%px\n", entry);
2049                 } else {
2050                         unsigned long i;
2051                         struct xa_node *node = xa_to_node(entry);
2052                         xa_dump_node(node);
2053                         for (i = 0; i < XA_CHUNK_SIZE; i++)
2054                                 xa_dump_entry(node->slots[i],
2055                                       index + (i << node->shift), node->shift);
2056                 }
2057         } else if (xa_is_value(entry))
2058                 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2059                                                 xa_to_value(entry), entry);
2060         else if (!xa_is_internal(entry))
2061                 pr_cont("%px\n", entry);
2062         else if (xa_is_retry(entry))
2063                 pr_cont("retry (%ld)\n", xa_to_internal(entry));
2064         else if (xa_is_sibling(entry))
2065                 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2066         else if (xa_is_zero(entry))
2067                 pr_cont("zero (%ld)\n", xa_to_internal(entry));
2068         else
2069                 pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2070 }
2071 
2072 void xa_dump(const struct xarray *xa)
2073 {
2074         void *entry = xa->xa_head;
2075         unsigned int shift = 0;
2076 
2077         pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2078                         xa->xa_flags, xa_marked(xa, XA_MARK_0),
2079                         xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2080         if (xa_is_node(entry))
2081                 shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2082         xa_dump_entry(entry, 0, shift);
2083 }
2084 #endif

/* [<][>][^][v][top][bottom][index][help] */