root/lib/test_xarray.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. IS_ENABLED
  2. xa_mk_index
  3. xa_store_index
  4. xa_insert_index
  5. xa_alloc_index
  6. xa_erase_index
  7. xa_store_order
  8. check_xa_err
  9. check_xas_retry
  10. check_xa_load
  11. check_xa_mark_1
  12. check_xa_mark_2
  13. check_xa_mark
  14. check_xa_shrink
  15. check_insert
  16. check_cmpxchg
  17. check_reserve
  18. check_xas_erase
  19. check_multi_store_1
  20. check_multi_store_2
  21. check_multi_store_3
  22. check_multi_store
  23. check_xa_alloc_1
  24. check_xa_alloc_2
  25. check_xa_alloc_3
  26. check_xa_alloc
  27. __check_store_iter
  28. check_store_iter
  29. check_multi_find_1
  30. check_multi_find_2
  31. check_multi_find_3
  32. check_find_1
  33. check_find_2
  34. check_find_3
  35. check_find_4
  36. check_find
  37. xa_find_entry
  38. check_find_entry
  39. check_pause
  40. check_move_tiny
  41. check_move_max
  42. check_move_small
  43. check_move
  44. xa_store_many_order
  45. check_create_range_1
  46. check_create_range_2
  47. check_create_range_3
  48. check_create_range_4
  49. check_create_range
  50. __check_store_range
  51. check_store_range
  52. check_align_1
  53. check_align_2
  54. check_align
  55. test_update_node
  56. shadow_remove
  57. check_workingset
  58. check_account
  59. check_destroy
  60. xarray_checks
  61. xarray_exit

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * test_xarray.c: Test the XArray API
   4  * Copyright (c) 2017-2018 Microsoft Corporation
   5  * Copyright (c) 2019-2020 Oracle
   6  * Author: Matthew Wilcox <willy@infradead.org>
   7  */
   8 
   9 #include <linux/xarray.h>
  10 #include <linux/module.h>
  11 
  12 static unsigned int tests_run;
  13 static unsigned int tests_passed;
  14 
  15 static const unsigned int order_limit =
  16                 IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
  17 
  18 #ifndef XA_DEBUG
  19 # ifdef __KERNEL__
  20 void xa_dump(const struct xarray *xa) { }
  21 # endif
  22 #undef XA_BUG_ON
  23 #define XA_BUG_ON(xa, x) do {                                   \
  24         tests_run++;                                            \
  25         if (x) {                                                \
  26                 printk("BUG at %s:%d\n", __func__, __LINE__);   \
  27                 xa_dump(xa);                                    \
  28                 dump_stack();                                   \
  29         } else {                                                \
  30                 tests_passed++;                                 \
  31         }                                                       \
  32 } while (0)
  33 #endif
  34 
  35 static void *xa_mk_index(unsigned long index)
  36 {
  37         return xa_mk_value(index & LONG_MAX);
  38 }
  39 
  40 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
  41 {
  42         return xa_store(xa, index, xa_mk_index(index), gfp);
  43 }
  44 
  45 static void xa_insert_index(struct xarray *xa, unsigned long index)
  46 {
  47         XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
  48                                 GFP_KERNEL) != 0);
  49 }
  50 
  51 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
  52 {
  53         u32 id;
  54 
  55         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
  56                                 gfp) != 0);
  57         XA_BUG_ON(xa, id != index);
  58 }
  59 
  60 static void xa_erase_index(struct xarray *xa, unsigned long index)
  61 {
  62         XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
  63         XA_BUG_ON(xa, xa_load(xa, index) != NULL);
  64 }
  65 
  66 /*
  67  * If anyone needs this, please move it to xarray.c.  We have no current
  68  * users outside the test suite because all current multislot users want
  69  * to use the advanced API.
  70  */
  71 static void *xa_store_order(struct xarray *xa, unsigned long index,
  72                 unsigned order, void *entry, gfp_t gfp)
  73 {
  74         XA_STATE_ORDER(xas, xa, index, order);
  75         void *curr;
  76 
  77         do {
  78                 xas_lock(&xas);
  79                 curr = xas_store(&xas, entry);
  80                 xas_unlock(&xas);
  81         } while (xas_nomem(&xas, gfp));
  82 
  83         return curr;
  84 }
  85 
  86 static noinline void check_xa_err(struct xarray *xa)
  87 {
  88         XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
  89         XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
  90 #ifndef __KERNEL__
  91         /* The kernel does not fail GFP_NOWAIT allocations */
  92         XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
  93         XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
  94 #endif
  95         XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
  96         XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
  97         XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
  98 // kills the test-suite :-(
  99 //      XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
 100 }
 101 
 102 static noinline void check_xas_retry(struct xarray *xa)
 103 {
 104         XA_STATE(xas, xa, 0);
 105         void *entry;
 106 
 107         xa_store_index(xa, 0, GFP_KERNEL);
 108         xa_store_index(xa, 1, GFP_KERNEL);
 109 
 110         rcu_read_lock();
 111         XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
 112         xa_erase_index(xa, 1);
 113         XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
 114         XA_BUG_ON(xa, xas_retry(&xas, NULL));
 115         XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
 116         xas_reset(&xas);
 117         XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
 118         XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
 119         XA_BUG_ON(xa, xas.xa_node != NULL);
 120         rcu_read_unlock();
 121 
 122         XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
 123 
 124         rcu_read_lock();
 125         XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
 126         xas.xa_node = XAS_RESTART;
 127         XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
 128         rcu_read_unlock();
 129 
 130         /* Make sure we can iterate through retry entries */
 131         xas_lock(&xas);
 132         xas_set(&xas, 0);
 133         xas_store(&xas, XA_RETRY_ENTRY);
 134         xas_set(&xas, 1);
 135         xas_store(&xas, XA_RETRY_ENTRY);
 136 
 137         xas_set(&xas, 0);
 138         xas_for_each(&xas, entry, ULONG_MAX) {
 139                 xas_store(&xas, xa_mk_index(xas.xa_index));
 140         }
 141         xas_unlock(&xas);
 142 
 143         xa_erase_index(xa, 0);
 144         xa_erase_index(xa, 1);
 145 }
 146 
 147 static noinline void check_xa_load(struct xarray *xa)
 148 {
 149         unsigned long i, j;
 150 
 151         for (i = 0; i < 1024; i++) {
 152                 for (j = 0; j < 1024; j++) {
 153                         void *entry = xa_load(xa, j);
 154                         if (j < i)
 155                                 XA_BUG_ON(xa, xa_to_value(entry) != j);
 156                         else
 157                                 XA_BUG_ON(xa, entry);
 158                 }
 159                 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
 160         }
 161 
 162         for (i = 0; i < 1024; i++) {
 163                 for (j = 0; j < 1024; j++) {
 164                         void *entry = xa_load(xa, j);
 165                         if (j >= i)
 166                                 XA_BUG_ON(xa, xa_to_value(entry) != j);
 167                         else
 168                                 XA_BUG_ON(xa, entry);
 169                 }
 170                 xa_erase_index(xa, i);
 171         }
 172         XA_BUG_ON(xa, !xa_empty(xa));
 173 }
 174 
 175 static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
 176 {
 177         unsigned int order;
 178         unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
 179 
 180         /* NULL elements have no marks set */
 181         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 182         xa_set_mark(xa, index, XA_MARK_0);
 183         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 184 
 185         /* Storing a pointer will not make a mark appear */
 186         XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
 187         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 188         xa_set_mark(xa, index, XA_MARK_0);
 189         XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
 190 
 191         /* Setting one mark will not set another mark */
 192         XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
 193         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
 194 
 195         /* Storing NULL clears marks, and they can't be set again */
 196         xa_erase_index(xa, index);
 197         XA_BUG_ON(xa, !xa_empty(xa));
 198         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 199         xa_set_mark(xa, index, XA_MARK_0);
 200         XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
 201 
 202         /*
 203          * Storing a multi-index entry over entries with marks gives the
 204          * entire entry the union of the marks
 205          */
 206         BUG_ON((index % 4) != 0);
 207         for (order = 2; order < max_order; order++) {
 208                 unsigned long base = round_down(index, 1UL << order);
 209                 unsigned long next = base + (1UL << order);
 210                 unsigned long i;
 211 
 212                 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
 213                 xa_set_mark(xa, index + 1, XA_MARK_0);
 214                 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
 215                 xa_set_mark(xa, index + 2, XA_MARK_2);
 216                 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
 217                 xa_store_order(xa, index, order, xa_mk_index(index),
 218                                 GFP_KERNEL);
 219                 for (i = base; i < next; i++) {
 220                         XA_STATE(xas, xa, i);
 221                         unsigned int seen = 0;
 222                         void *entry;
 223 
 224                         XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
 225                         XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
 226                         XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
 227 
 228                         /* We should see two elements in the array */
 229                         rcu_read_lock();
 230                         xas_for_each(&xas, entry, ULONG_MAX)
 231                                 seen++;
 232                         rcu_read_unlock();
 233                         XA_BUG_ON(xa, seen != 2);
 234 
 235                         /* One of which is marked */
 236                         xas_set(&xas, 0);
 237                         seen = 0;
 238                         rcu_read_lock();
 239                         xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
 240                                 seen++;
 241                         rcu_read_unlock();
 242                         XA_BUG_ON(xa, seen != 1);
 243                 }
 244                 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
 245                 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
 246                 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
 247                 xa_erase_index(xa, index);
 248                 xa_erase_index(xa, next);
 249                 XA_BUG_ON(xa, !xa_empty(xa));
 250         }
 251         XA_BUG_ON(xa, !xa_empty(xa));
 252 }
 253 
 254 static noinline void check_xa_mark_2(struct xarray *xa)
 255 {
 256         XA_STATE(xas, xa, 0);
 257         unsigned long index;
 258         unsigned int count = 0;
 259         void *entry;
 260 
 261         xa_store_index(xa, 0, GFP_KERNEL);
 262         xa_set_mark(xa, 0, XA_MARK_0);
 263         xas_lock(&xas);
 264         xas_load(&xas);
 265         xas_init_marks(&xas);
 266         xas_unlock(&xas);
 267         XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
 268 
 269         for (index = 3500; index < 4500; index++) {
 270                 xa_store_index(xa, index, GFP_KERNEL);
 271                 xa_set_mark(xa, index, XA_MARK_0);
 272         }
 273 
 274         xas_reset(&xas);
 275         rcu_read_lock();
 276         xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
 277                 count++;
 278         rcu_read_unlock();
 279         XA_BUG_ON(xa, count != 1000);
 280 
 281         xas_lock(&xas);
 282         xas_for_each(&xas, entry, ULONG_MAX) {
 283                 xas_init_marks(&xas);
 284                 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
 285                 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
 286         }
 287         xas_unlock(&xas);
 288 
 289         xa_destroy(xa);
 290 }
 291 
 292 static noinline void check_xa_mark(struct xarray *xa)
 293 {
 294         unsigned long index;
 295 
 296         for (index = 0; index < 16384; index += 4)
 297                 check_xa_mark_1(xa, index);
 298 
 299         check_xa_mark_2(xa);
 300 }
 301 
 302 static noinline void check_xa_shrink(struct xarray *xa)
 303 {
 304         XA_STATE(xas, xa, 1);
 305         struct xa_node *node;
 306         unsigned int order;
 307         unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
 308 
 309         XA_BUG_ON(xa, !xa_empty(xa));
 310         XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
 311         XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
 312 
 313         /*
 314          * Check that erasing the entry at 1 shrinks the tree and properly
 315          * marks the node as being deleted.
 316          */
 317         xas_lock(&xas);
 318         XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
 319         node = xas.xa_node;
 320         XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
 321         XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
 322         XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
 323         XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
 324         XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
 325         XA_BUG_ON(xa, xas_load(&xas) != NULL);
 326         xas_unlock(&xas);
 327         XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 328         xa_erase_index(xa, 0);
 329         XA_BUG_ON(xa, !xa_empty(xa));
 330 
 331         for (order = 0; order < max_order; order++) {
 332                 unsigned long max = (1UL << order) - 1;
 333                 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
 334                 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
 335                 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
 336                 rcu_read_lock();
 337                 node = xa_head(xa);
 338                 rcu_read_unlock();
 339                 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
 340                                 NULL);
 341                 rcu_read_lock();
 342                 XA_BUG_ON(xa, xa_head(xa) == node);
 343                 rcu_read_unlock();
 344                 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
 345                 xa_erase_index(xa, ULONG_MAX);
 346                 XA_BUG_ON(xa, xa->xa_head != node);
 347                 xa_erase_index(xa, 0);
 348         }
 349 }
 350 
 351 static noinline void check_insert(struct xarray *xa)
 352 {
 353         unsigned long i;
 354 
 355         for (i = 0; i < 1024; i++) {
 356                 xa_insert_index(xa, i);
 357                 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
 358                 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
 359                 xa_erase_index(xa, i);
 360         }
 361 
 362         for (i = 10; i < BITS_PER_LONG; i++) {
 363                 xa_insert_index(xa, 1UL << i);
 364                 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
 365                 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
 366                 xa_erase_index(xa, 1UL << i);
 367 
 368                 xa_insert_index(xa, (1UL << i) - 1);
 369                 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
 370                 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
 371                 xa_erase_index(xa, (1UL << i) - 1);
 372         }
 373 
 374         xa_insert_index(xa, ~0UL);
 375         XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
 376         XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
 377         xa_erase_index(xa, ~0UL);
 378 
 379         XA_BUG_ON(xa, !xa_empty(xa));
 380 }
 381 
 382 static noinline void check_cmpxchg(struct xarray *xa)
 383 {
 384         void *FIVE = xa_mk_value(5);
 385         void *SIX = xa_mk_value(6);
 386         void *LOTS = xa_mk_value(12345678);
 387 
 388         XA_BUG_ON(xa, !xa_empty(xa));
 389         XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
 390         XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
 391         XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
 392         XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
 393         XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
 394         XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
 395         XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
 396         xa_erase_index(xa, 12345678);
 397         xa_erase_index(xa, 5);
 398         XA_BUG_ON(xa, !xa_empty(xa));
 399 }
 400 
 401 static noinline void check_reserve(struct xarray *xa)
 402 {
 403         void *entry;
 404         unsigned long index;
 405         int count;
 406 
 407         /* An array with a reserved entry is not empty */
 408         XA_BUG_ON(xa, !xa_empty(xa));
 409         XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 410         XA_BUG_ON(xa, xa_empty(xa));
 411         XA_BUG_ON(xa, xa_load(xa, 12345678));
 412         xa_release(xa, 12345678);
 413         XA_BUG_ON(xa, !xa_empty(xa));
 414 
 415         /* Releasing a used entry does nothing */
 416         XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 417         XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
 418         xa_release(xa, 12345678);
 419         xa_erase_index(xa, 12345678);
 420         XA_BUG_ON(xa, !xa_empty(xa));
 421 
 422         /* cmpxchg sees a reserved entry as ZERO */
 423         XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 424         XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
 425                                 xa_mk_value(12345678), GFP_NOWAIT) != NULL);
 426         xa_release(xa, 12345678);
 427         xa_erase_index(xa, 12345678);
 428         XA_BUG_ON(xa, !xa_empty(xa));
 429 
 430         /* xa_insert treats it as busy */
 431         XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
 432         XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
 433                         -EBUSY);
 434         XA_BUG_ON(xa, xa_empty(xa));
 435         XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
 436         XA_BUG_ON(xa, !xa_empty(xa));
 437 
 438         /* Can iterate through a reserved entry */
 439         xa_store_index(xa, 5, GFP_KERNEL);
 440         XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
 441         xa_store_index(xa, 7, GFP_KERNEL);
 442 
 443         count = 0;
 444         xa_for_each(xa, index, entry) {
 445                 XA_BUG_ON(xa, index != 5 && index != 7);
 446                 count++;
 447         }
 448         XA_BUG_ON(xa, count != 2);
 449 
 450         /* If we free a reserved entry, we should be able to allocate it */
 451         if (xa->xa_flags & XA_FLAGS_ALLOC) {
 452                 u32 id;
 453 
 454                 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
 455                                         XA_LIMIT(5, 10), GFP_KERNEL) != 0);
 456                 XA_BUG_ON(xa, id != 8);
 457 
 458                 xa_release(xa, 6);
 459                 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
 460                                         XA_LIMIT(5, 10), GFP_KERNEL) != 0);
 461                 XA_BUG_ON(xa, id != 6);
 462         }
 463 
 464         xa_destroy(xa);
 465 }
 466 
 467 static noinline void check_xas_erase(struct xarray *xa)
 468 {
 469         XA_STATE(xas, xa, 0);
 470         void *entry;
 471         unsigned long i, j;
 472 
 473         for (i = 0; i < 200; i++) {
 474                 for (j = i; j < 2 * i + 17; j++) {
 475                         xas_set(&xas, j);
 476                         do {
 477                                 xas_lock(&xas);
 478                                 xas_store(&xas, xa_mk_index(j));
 479                                 xas_unlock(&xas);
 480                         } while (xas_nomem(&xas, GFP_KERNEL));
 481                 }
 482 
 483                 xas_set(&xas, ULONG_MAX);
 484                 do {
 485                         xas_lock(&xas);
 486                         xas_store(&xas, xa_mk_value(0));
 487                         xas_unlock(&xas);
 488                 } while (xas_nomem(&xas, GFP_KERNEL));
 489 
 490                 xas_lock(&xas);
 491                 xas_store(&xas, NULL);
 492 
 493                 xas_set(&xas, 0);
 494                 j = i;
 495                 xas_for_each(&xas, entry, ULONG_MAX) {
 496                         XA_BUG_ON(xa, entry != xa_mk_index(j));
 497                         xas_store(&xas, NULL);
 498                         j++;
 499                 }
 500                 xas_unlock(&xas);
 501                 XA_BUG_ON(xa, !xa_empty(xa));
 502         }
 503 }
 504 
 505 #ifdef CONFIG_XARRAY_MULTI
 506 static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
 507                 unsigned int order)
 508 {
 509         XA_STATE(xas, xa, index);
 510         unsigned long min = index & ~((1UL << order) - 1);
 511         unsigned long max = min + (1UL << order);
 512 
 513         xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
 514         XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
 515         XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
 516         XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 517         XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 518 
 519         xas_lock(&xas);
 520         XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
 521         xas_unlock(&xas);
 522         XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
 523         XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
 524         XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 525         XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 526 
 527         xa_erase_index(xa, min);
 528         XA_BUG_ON(xa, !xa_empty(xa));
 529 }
 530 
 531 static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
 532                 unsigned int order)
 533 {
 534         XA_STATE(xas, xa, index);
 535         xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
 536 
 537         xas_lock(&xas);
 538         XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
 539         XA_BUG_ON(xa, xas.xa_index != index);
 540         XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
 541         xas_unlock(&xas);
 542         XA_BUG_ON(xa, !xa_empty(xa));
 543 }
 544 
 545 static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
 546                 unsigned int order)
 547 {
 548         XA_STATE(xas, xa, 0);
 549         void *entry;
 550         int n = 0;
 551 
 552         xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
 553 
 554         xas_lock(&xas);
 555         xas_for_each(&xas, entry, ULONG_MAX) {
 556                 XA_BUG_ON(xa, entry != xa_mk_index(index));
 557                 n++;
 558         }
 559         XA_BUG_ON(xa, n != 1);
 560         xas_set(&xas, index + 1);
 561         xas_for_each(&xas, entry, ULONG_MAX) {
 562                 XA_BUG_ON(xa, entry != xa_mk_index(index));
 563                 n++;
 564         }
 565         XA_BUG_ON(xa, n != 2);
 566         xas_unlock(&xas);
 567 
 568         xa_destroy(xa);
 569 }
 570 #endif
 571 
 572 static noinline void check_multi_store(struct xarray *xa)
 573 {
 574 #ifdef CONFIG_XARRAY_MULTI
 575         unsigned long i, j, k;
 576         unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
 577 
 578         /* Loading from any position returns the same value */
 579         xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
 580         XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 581         XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
 582         XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
 583         rcu_read_lock();
 584         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
 585         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
 586         rcu_read_unlock();
 587 
 588         /* Storing adjacent to the value does not alter the value */
 589         xa_store(xa, 3, xa, GFP_KERNEL);
 590         XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
 591         XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
 592         XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
 593         rcu_read_lock();
 594         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
 595         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
 596         rcu_read_unlock();
 597 
 598         /* Overwriting multiple indexes works */
 599         xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
 600         XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
 601         XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
 602         XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
 603         XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
 604         XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
 605         rcu_read_lock();
 606         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
 607         XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
 608         rcu_read_unlock();
 609 
 610         /* We can erase multiple values with a single store */
 611         xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
 612         XA_BUG_ON(xa, !xa_empty(xa));
 613 
 614         /* Even when the first slot is empty but the others aren't */
 615         xa_store_index(xa, 1, GFP_KERNEL);
 616         xa_store_index(xa, 2, GFP_KERNEL);
 617         xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
 618         XA_BUG_ON(xa, !xa_empty(xa));
 619 
 620         for (i = 0; i < max_order; i++) {
 621                 for (j = 0; j < max_order; j++) {
 622                         xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
 623                         xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
 624 
 625                         for (k = 0; k < max_order; k++) {
 626                                 void *entry = xa_load(xa, (1UL << k) - 1);
 627                                 if ((i < k) && (j < k))
 628                                         XA_BUG_ON(xa, entry != NULL);
 629                                 else
 630                                         XA_BUG_ON(xa, entry != xa_mk_index(j));
 631                         }
 632 
 633                         xa_erase(xa, 0);
 634                         XA_BUG_ON(xa, !xa_empty(xa));
 635                 }
 636         }
 637 
 638         for (i = 0; i < 20; i++) {
 639                 check_multi_store_1(xa, 200, i);
 640                 check_multi_store_1(xa, 0, i);
 641                 check_multi_store_1(xa, (1UL << i) + 1, i);
 642         }
 643         check_multi_store_2(xa, 4095, 9);
 644 
 645         for (i = 1; i < 20; i++) {
 646                 check_multi_store_3(xa, 0, i);
 647                 check_multi_store_3(xa, 1UL << i, i);
 648         }
 649 #endif
 650 }
 651 
 652 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
 653 {
 654         int i;
 655         u32 id;
 656 
 657         XA_BUG_ON(xa, !xa_empty(xa));
 658         /* An empty array should assign %base to the first alloc */
 659         xa_alloc_index(xa, base, GFP_KERNEL);
 660 
 661         /* Erasing it should make the array empty again */
 662         xa_erase_index(xa, base);
 663         XA_BUG_ON(xa, !xa_empty(xa));
 664 
 665         /* And it should assign %base again */
 666         xa_alloc_index(xa, base, GFP_KERNEL);
 667 
 668         /* Allocating and then erasing a lot should not lose base */
 669         for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
 670                 xa_alloc_index(xa, i, GFP_KERNEL);
 671         for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
 672                 xa_erase_index(xa, i);
 673         xa_alloc_index(xa, base, GFP_KERNEL);
 674 
 675         /* Destroying the array should do the same as erasing */
 676         xa_destroy(xa);
 677 
 678         /* And it should assign %base again */
 679         xa_alloc_index(xa, base, GFP_KERNEL);
 680 
 681         /* The next assigned ID should be base+1 */
 682         xa_alloc_index(xa, base + 1, GFP_KERNEL);
 683         xa_erase_index(xa, base + 1);
 684 
 685         /* Storing a value should mark it used */
 686         xa_store_index(xa, base + 1, GFP_KERNEL);
 687         xa_alloc_index(xa, base + 2, GFP_KERNEL);
 688 
 689         /* If we then erase base, it should be free */
 690         xa_erase_index(xa, base);
 691         xa_alloc_index(xa, base, GFP_KERNEL);
 692 
 693         xa_erase_index(xa, base + 1);
 694         xa_erase_index(xa, base + 2);
 695 
 696         for (i = 1; i < 5000; i++) {
 697                 xa_alloc_index(xa, base + i, GFP_KERNEL);
 698         }
 699 
 700         xa_destroy(xa);
 701 
 702         /* Check that we fail properly at the limit of allocation */
 703         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
 704                                 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 705                                 GFP_KERNEL) != 0);
 706         XA_BUG_ON(xa, id != 0xfffffffeU);
 707         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
 708                                 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 709                                 GFP_KERNEL) != 0);
 710         XA_BUG_ON(xa, id != 0xffffffffU);
 711         id = 3;
 712         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
 713                                 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
 714                                 GFP_KERNEL) != -EBUSY);
 715         XA_BUG_ON(xa, id != 3);
 716         xa_destroy(xa);
 717 
 718         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
 719                                 GFP_KERNEL) != -EBUSY);
 720         XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
 721         XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
 722                                 GFP_KERNEL) != -EBUSY);
 723         xa_erase_index(xa, 3);
 724         XA_BUG_ON(xa, !xa_empty(xa));
 725 }
 726 
 727 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
 728 {
 729         unsigned int i, id;
 730         unsigned long index;
 731         void *entry;
 732 
 733         /* Allocate and free a NULL and check xa_empty() behaves */
 734         XA_BUG_ON(xa, !xa_empty(xa));
 735         XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 736         XA_BUG_ON(xa, id != base);
 737         XA_BUG_ON(xa, xa_empty(xa));
 738         XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
 739         XA_BUG_ON(xa, !xa_empty(xa));
 740 
 741         /* Ditto, but check destroy instead of erase */
 742         XA_BUG_ON(xa, !xa_empty(xa));
 743         XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 744         XA_BUG_ON(xa, id != base);
 745         XA_BUG_ON(xa, xa_empty(xa));
 746         xa_destroy(xa);
 747         XA_BUG_ON(xa, !xa_empty(xa));
 748 
 749         for (i = base; i < base + 10; i++) {
 750                 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
 751                                         GFP_KERNEL) != 0);
 752                 XA_BUG_ON(xa, id != i);
 753         }
 754 
 755         XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
 756         XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
 757         XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
 758         XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
 759         XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
 760         XA_BUG_ON(xa, id != 5);
 761 
 762         xa_for_each(xa, index, entry) {
 763                 xa_erase_index(xa, index);
 764         }
 765 
 766         for (i = base; i < base + 9; i++) {
 767                 XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
 768                 XA_BUG_ON(xa, xa_empty(xa));
 769         }
 770         XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
 771         XA_BUG_ON(xa, xa_empty(xa));
 772         XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
 773         XA_BUG_ON(xa, !xa_empty(xa));
 774 
 775         xa_destroy(xa);
 776 }
 777 
 778 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
 779 {
 780         struct xa_limit limit = XA_LIMIT(1, 0x3fff);
 781         u32 next = 0;
 782         unsigned int i, id;
 783         unsigned long index;
 784         void *entry;
 785 
 786         XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
 787                                 &next, GFP_KERNEL) != 0);
 788         XA_BUG_ON(xa, id != 1);
 789 
 790         next = 0x3ffd;
 791         XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
 792                                 &next, GFP_KERNEL) != 0);
 793         XA_BUG_ON(xa, id != 0x3ffd);
 794         xa_erase_index(xa, 0x3ffd);
 795         xa_erase_index(xa, 1);
 796         XA_BUG_ON(xa, !xa_empty(xa));
 797 
 798         for (i = 0x3ffe; i < 0x4003; i++) {
 799                 if (i < 0x4000)
 800                         entry = xa_mk_index(i);
 801                 else
 802                         entry = xa_mk_index(i - 0x3fff);
 803                 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
 804                                         &next, GFP_KERNEL) != (id == 1));
 805                 XA_BUG_ON(xa, xa_mk_index(id) != entry);
 806         }
 807 
 808         /* Check wrap-around is handled correctly */
 809         if (base != 0)
 810                 xa_erase_index(xa, base);
 811         xa_erase_index(xa, base + 1);
 812         next = UINT_MAX;
 813         XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
 814                                 xa_limit_32b, &next, GFP_KERNEL) != 0);
 815         XA_BUG_ON(xa, id != UINT_MAX);
 816         XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
 817                                 xa_limit_32b, &next, GFP_KERNEL) != 1);
 818         XA_BUG_ON(xa, id != base);
 819         XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
 820                                 xa_limit_32b, &next, GFP_KERNEL) != 0);
 821         XA_BUG_ON(xa, id != base + 1);
 822 
 823         xa_for_each(xa, index, entry)
 824                 xa_erase_index(xa, index);
 825 
 826         XA_BUG_ON(xa, !xa_empty(xa));
 827 }
 828 
 829 static DEFINE_XARRAY_ALLOC(xa0);
 830 static DEFINE_XARRAY_ALLOC1(xa1);
 831 
 832 static noinline void check_xa_alloc(void)
 833 {
 834         check_xa_alloc_1(&xa0, 0);
 835         check_xa_alloc_1(&xa1, 1);
 836         check_xa_alloc_2(&xa0, 0);
 837         check_xa_alloc_2(&xa1, 1);
 838         check_xa_alloc_3(&xa0, 0);
 839         check_xa_alloc_3(&xa1, 1);
 840 }
 841 
 842 static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
 843                         unsigned int order, unsigned int present)
 844 {
 845         XA_STATE_ORDER(xas, xa, start, order);
 846         void *entry;
 847         unsigned int count = 0;
 848 
 849 retry:
 850         xas_lock(&xas);
 851         xas_for_each_conflict(&xas, entry) {
 852                 XA_BUG_ON(xa, !xa_is_value(entry));
 853                 XA_BUG_ON(xa, entry < xa_mk_index(start));
 854                 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
 855                 count++;
 856         }
 857         xas_store(&xas, xa_mk_index(start));
 858         xas_unlock(&xas);
 859         if (xas_nomem(&xas, GFP_KERNEL)) {
 860                 count = 0;
 861                 goto retry;
 862         }
 863         XA_BUG_ON(xa, xas_error(&xas));
 864         XA_BUG_ON(xa, count != present);
 865         XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
 866         XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
 867                         xa_mk_index(start));
 868         xa_erase_index(xa, start);
 869 }
 870 
 871 static noinline void check_store_iter(struct xarray *xa)
 872 {
 873         unsigned int i, j;
 874         unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
 875 
 876         for (i = 0; i < max_order; i++) {
 877                 unsigned int min = 1 << i;
 878                 unsigned int max = (2 << i) - 1;
 879                 __check_store_iter(xa, 0, i, 0);
 880                 XA_BUG_ON(xa, !xa_empty(xa));
 881                 __check_store_iter(xa, min, i, 0);
 882                 XA_BUG_ON(xa, !xa_empty(xa));
 883 
 884                 xa_store_index(xa, min, GFP_KERNEL);
 885                 __check_store_iter(xa, min, i, 1);
 886                 XA_BUG_ON(xa, !xa_empty(xa));
 887                 xa_store_index(xa, max, GFP_KERNEL);
 888                 __check_store_iter(xa, min, i, 1);
 889                 XA_BUG_ON(xa, !xa_empty(xa));
 890 
 891                 for (j = 0; j < min; j++)
 892                         xa_store_index(xa, j, GFP_KERNEL);
 893                 __check_store_iter(xa, 0, i, min);
 894                 XA_BUG_ON(xa, !xa_empty(xa));
 895                 for (j = 0; j < min; j++)
 896                         xa_store_index(xa, min + j, GFP_KERNEL);
 897                 __check_store_iter(xa, min, i, min);
 898                 XA_BUG_ON(xa, !xa_empty(xa));
 899         }
 900 #ifdef CONFIG_XARRAY_MULTI
 901         xa_store_index(xa, 63, GFP_KERNEL);
 902         xa_store_index(xa, 65, GFP_KERNEL);
 903         __check_store_iter(xa, 64, 2, 1);
 904         xa_erase_index(xa, 63);
 905 #endif
 906         XA_BUG_ON(xa, !xa_empty(xa));
 907 }
 908 
 909 static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
 910 {
 911 #ifdef CONFIG_XARRAY_MULTI
 912         unsigned long multi = 3 << order;
 913         unsigned long next = 4 << order;
 914         unsigned long index;
 915 
 916         xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
 917         XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
 918         XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
 919 
 920         index = 0;
 921         XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
 922                         xa_mk_value(multi));
 923         XA_BUG_ON(xa, index != multi);
 924         index = multi + 1;
 925         XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
 926                         xa_mk_value(multi));
 927         XA_BUG_ON(xa, (index < multi) || (index >= next));
 928         XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
 929                         xa_mk_value(next));
 930         XA_BUG_ON(xa, index != next);
 931         XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
 932         XA_BUG_ON(xa, index != next);
 933 
 934         xa_erase_index(xa, multi);
 935         xa_erase_index(xa, next);
 936         xa_erase_index(xa, next + 1);
 937         XA_BUG_ON(xa, !xa_empty(xa));
 938 #endif
 939 }
 940 
 941 static noinline void check_multi_find_2(struct xarray *xa)
 942 {
 943         unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
 944         unsigned int i, j;
 945         void *entry;
 946 
 947         for (i = 0; i < max_order; i++) {
 948                 unsigned long index = 1UL << i;
 949                 for (j = 0; j < index; j++) {
 950                         XA_STATE(xas, xa, j + index);
 951                         xa_store_index(xa, index - 1, GFP_KERNEL);
 952                         xa_store_order(xa, index, i, xa_mk_index(index),
 953                                         GFP_KERNEL);
 954                         rcu_read_lock();
 955                         xas_for_each(&xas, entry, ULONG_MAX) {
 956                                 xa_erase_index(xa, index);
 957                         }
 958                         rcu_read_unlock();
 959                         xa_erase_index(xa, index - 1);
 960                         XA_BUG_ON(xa, !xa_empty(xa));
 961                 }
 962         }
 963 }
 964 
 965 static noinline void check_multi_find_3(struct xarray *xa)
 966 {
 967         unsigned int order;
 968 
 969         for (order = 5; order < order_limit; order++) {
 970                 unsigned long index = 1UL << (order - 5);
 971 
 972                 XA_BUG_ON(xa, !xa_empty(xa));
 973                 xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
 974                 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
 975                 xa_erase_index(xa, 0);
 976         }
 977 }
 978 
 979 static noinline void check_find_1(struct xarray *xa)
 980 {
 981         unsigned long i, j, k;
 982 
 983         XA_BUG_ON(xa, !xa_empty(xa));
 984 
 985         /*
 986          * Check xa_find with all pairs between 0 and 99 inclusive,
 987          * starting at every index between 0 and 99
 988          */
 989         for (i = 0; i < 100; i++) {
 990                 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
 991                 xa_set_mark(xa, i, XA_MARK_0);
 992                 for (j = 0; j < i; j++) {
 993                         XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
 994                                         NULL);
 995                         xa_set_mark(xa, j, XA_MARK_0);
 996                         for (k = 0; k < 100; k++) {
 997                                 unsigned long index = k;
 998                                 void *entry = xa_find(xa, &index, ULONG_MAX,
 999                                                                 XA_PRESENT);
1000                                 if (k <= j)
1001                                         XA_BUG_ON(xa, index != j);
1002                                 else if (k <= i)
1003                                         XA_BUG_ON(xa, index != i);
1004                                 else
1005                                         XA_BUG_ON(xa, entry != NULL);
1006 
1007                                 index = k;
1008                                 entry = xa_find(xa, &index, ULONG_MAX,
1009                                                                 XA_MARK_0);
1010                                 if (k <= j)
1011                                         XA_BUG_ON(xa, index != j);
1012                                 else if (k <= i)
1013                                         XA_BUG_ON(xa, index != i);
1014                                 else
1015                                         XA_BUG_ON(xa, entry != NULL);
1016                         }
1017                         xa_erase_index(xa, j);
1018                         XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
1019                         XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
1020                 }
1021                 xa_erase_index(xa, i);
1022                 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
1023         }
1024         XA_BUG_ON(xa, !xa_empty(xa));
1025 }
1026 
1027 static noinline void check_find_2(struct xarray *xa)
1028 {
1029         void *entry;
1030         unsigned long i, j, index;
1031 
1032         xa_for_each(xa, index, entry) {
1033                 XA_BUG_ON(xa, true);
1034         }
1035 
1036         for (i = 0; i < 1024; i++) {
1037                 xa_store_index(xa, index, GFP_KERNEL);
1038                 j = 0;
1039                 xa_for_each(xa, index, entry) {
1040                         XA_BUG_ON(xa, xa_mk_index(index) != entry);
1041                         XA_BUG_ON(xa, index != j++);
1042                 }
1043         }
1044 
1045         xa_destroy(xa);
1046 }
1047 
1048 static noinline void check_find_3(struct xarray *xa)
1049 {
1050         XA_STATE(xas, xa, 0);
1051         unsigned long i, j, k;
1052         void *entry;
1053 
1054         for (i = 0; i < 100; i++) {
1055                 for (j = 0; j < 100; j++) {
1056                         rcu_read_lock();
1057                         for (k = 0; k < 100; k++) {
1058                                 xas_set(&xas, j);
1059                                 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
1060                                         ;
1061                                 if (j > k)
1062                                         XA_BUG_ON(xa,
1063                                                 xas.xa_node != XAS_RESTART);
1064                         }
1065                         rcu_read_unlock();
1066                 }
1067                 xa_store_index(xa, i, GFP_KERNEL);
1068                 xa_set_mark(xa, i, XA_MARK_0);
1069         }
1070         xa_destroy(xa);
1071 }
1072 
1073 static noinline void check_find_4(struct xarray *xa)
1074 {
1075         unsigned long index = 0;
1076         void *entry;
1077 
1078         xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1079 
1080         entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1081         XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
1082 
1083         entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
1084         XA_BUG_ON(xa, entry);
1085 
1086         xa_erase_index(xa, ULONG_MAX);
1087 }
1088 
1089 static noinline void check_find(struct xarray *xa)
1090 {
1091         unsigned i;
1092 
1093         check_find_1(xa);
1094         check_find_2(xa);
1095         check_find_3(xa);
1096         check_find_4(xa);
1097 
1098         for (i = 2; i < 10; i++)
1099                 check_multi_find_1(xa, i);
1100         check_multi_find_2(xa);
1101         check_multi_find_3(xa);
1102 }
1103 
1104 /* See find_swap_entry() in mm/shmem.c */
1105 static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1106 {
1107         XA_STATE(xas, xa, 0);
1108         unsigned int checked = 0;
1109         void *entry;
1110 
1111         rcu_read_lock();
1112         xas_for_each(&xas, entry, ULONG_MAX) {
1113                 if (xas_retry(&xas, entry))
1114                         continue;
1115                 if (entry == item)
1116                         break;
1117                 checked++;
1118                 if ((checked % 4) != 0)
1119                         continue;
1120                 xas_pause(&xas);
1121         }
1122         rcu_read_unlock();
1123 
1124         return entry ? xas.xa_index : -1;
1125 }
1126 
1127 static noinline void check_find_entry(struct xarray *xa)
1128 {
1129 #ifdef CONFIG_XARRAY_MULTI
1130         unsigned int order;
1131         unsigned long offset, index;
1132 
1133         for (order = 0; order < 20; order++) {
1134                 for (offset = 0; offset < (1UL << (order + 3));
1135                      offset += (1UL << order)) {
1136                         for (index = 0; index < (1UL << (order + 5));
1137                              index += (1UL << order)) {
1138                                 xa_store_order(xa, index, order,
1139                                                 xa_mk_index(index), GFP_KERNEL);
1140                                 XA_BUG_ON(xa, xa_load(xa, index) !=
1141                                                 xa_mk_index(index));
1142                                 XA_BUG_ON(xa, xa_find_entry(xa,
1143                                                 xa_mk_index(index)) != index);
1144                         }
1145                         XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1146                         xa_destroy(xa);
1147                 }
1148         }
1149 #endif
1150 
1151         XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1152         xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1153         XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1154         XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
1155         xa_erase_index(xa, ULONG_MAX);
1156         XA_BUG_ON(xa, !xa_empty(xa));
1157 }
1158 
1159 static noinline void check_pause(struct xarray *xa)
1160 {
1161         XA_STATE(xas, xa, 0);
1162         void *entry;
1163         unsigned int order;
1164         unsigned long index = 1;
1165         unsigned int count = 0;
1166 
1167         for (order = 0; order < order_limit; order++) {
1168                 XA_BUG_ON(xa, xa_store_order(xa, index, order,
1169                                         xa_mk_index(index), GFP_KERNEL));
1170                 index += 1UL << order;
1171         }
1172 
1173         rcu_read_lock();
1174         xas_for_each(&xas, entry, ULONG_MAX) {
1175                 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1176                 count++;
1177         }
1178         rcu_read_unlock();
1179         XA_BUG_ON(xa, count != order_limit);
1180 
1181         count = 0;
1182         xas_set(&xas, 0);
1183         rcu_read_lock();
1184         xas_for_each(&xas, entry, ULONG_MAX) {
1185                 XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
1186                 count++;
1187                 xas_pause(&xas);
1188         }
1189         rcu_read_unlock();
1190         XA_BUG_ON(xa, count != order_limit);
1191 
1192         xa_destroy(xa);
1193 }
1194 
1195 static noinline void check_move_tiny(struct xarray *xa)
1196 {
1197         XA_STATE(xas, xa, 0);
1198 
1199         XA_BUG_ON(xa, !xa_empty(xa));
1200         rcu_read_lock();
1201         XA_BUG_ON(xa, xas_next(&xas) != NULL);
1202         XA_BUG_ON(xa, xas_next(&xas) != NULL);
1203         rcu_read_unlock();
1204         xa_store_index(xa, 0, GFP_KERNEL);
1205         rcu_read_lock();
1206         xas_set(&xas, 0);
1207         XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1208         XA_BUG_ON(xa, xas_next(&xas) != NULL);
1209         xas_set(&xas, 0);
1210         XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1211         XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1212         rcu_read_unlock();
1213         xa_erase_index(xa, 0);
1214         XA_BUG_ON(xa, !xa_empty(xa));
1215 }
1216 
1217 static noinline void check_move_max(struct xarray *xa)
1218 {
1219         XA_STATE(xas, xa, 0);
1220 
1221         xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1222         rcu_read_lock();
1223         XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1224         XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1225         rcu_read_unlock();
1226 
1227         xas_set(&xas, 0);
1228         rcu_read_lock();
1229         XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
1230         xas_pause(&xas);
1231         XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
1232         rcu_read_unlock();
1233 
1234         xa_erase_index(xa, ULONG_MAX);
1235         XA_BUG_ON(xa, !xa_empty(xa));
1236 }
1237 
1238 static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1239 {
1240         XA_STATE(xas, xa, 0);
1241         unsigned long i;
1242 
1243         xa_store_index(xa, 0, GFP_KERNEL);
1244         xa_store_index(xa, idx, GFP_KERNEL);
1245 
1246         rcu_read_lock();
1247         for (i = 0; i < idx * 4; i++) {
1248                 void *entry = xas_next(&xas);
1249                 if (i <= idx)
1250                         XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1251                 XA_BUG_ON(xa, xas.xa_index != i);
1252                 if (i == 0 || i == idx)
1253                         XA_BUG_ON(xa, entry != xa_mk_index(i));
1254                 else
1255                         XA_BUG_ON(xa, entry != NULL);
1256         }
1257         xas_next(&xas);
1258         XA_BUG_ON(xa, xas.xa_index != i);
1259 
1260         do {
1261                 void *entry = xas_prev(&xas);
1262                 i--;
1263                 if (i <= idx)
1264                         XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1265                 XA_BUG_ON(xa, xas.xa_index != i);
1266                 if (i == 0 || i == idx)
1267                         XA_BUG_ON(xa, entry != xa_mk_index(i));
1268                 else
1269                         XA_BUG_ON(xa, entry != NULL);
1270         } while (i > 0);
1271 
1272         xas_set(&xas, ULONG_MAX);
1273         XA_BUG_ON(xa, xas_next(&xas) != NULL);
1274         XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1275         XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
1276         XA_BUG_ON(xa, xas.xa_index != 0);
1277         XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1278         XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1279         rcu_read_unlock();
1280 
1281         xa_erase_index(xa, 0);
1282         xa_erase_index(xa, idx);
1283         XA_BUG_ON(xa, !xa_empty(xa));
1284 }
1285 
1286 static noinline void check_move(struct xarray *xa)
1287 {
1288         XA_STATE(xas, xa, (1 << 16) - 1);
1289         unsigned long i;
1290 
1291         for (i = 0; i < (1 << 16); i++)
1292                 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1293 
1294         rcu_read_lock();
1295         do {
1296                 void *entry = xas_prev(&xas);
1297                 i--;
1298                 XA_BUG_ON(xa, entry != xa_mk_index(i));
1299                 XA_BUG_ON(xa, i != xas.xa_index);
1300         } while (i != 0);
1301 
1302         XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1303         XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1304 
1305         do {
1306                 void *entry = xas_next(&xas);
1307                 XA_BUG_ON(xa, entry != xa_mk_index(i));
1308                 XA_BUG_ON(xa, i != xas.xa_index);
1309                 i++;
1310         } while (i < (1 << 16));
1311         rcu_read_unlock();
1312 
1313         for (i = (1 << 8); i < (1 << 15); i++)
1314                 xa_erase_index(xa, i);
1315 
1316         i = xas.xa_index;
1317 
1318         rcu_read_lock();
1319         do {
1320                 void *entry = xas_prev(&xas);
1321                 i--;
1322                 if ((i < (1 << 8)) || (i >= (1 << 15)))
1323                         XA_BUG_ON(xa, entry != xa_mk_index(i));
1324                 else
1325                         XA_BUG_ON(xa, entry != NULL);
1326                 XA_BUG_ON(xa, i != xas.xa_index);
1327         } while (i != 0);
1328 
1329         XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1330         XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1331 
1332         do {
1333                 void *entry = xas_next(&xas);
1334                 if ((i < (1 << 8)) || (i >= (1 << 15)))
1335                         XA_BUG_ON(xa, entry != xa_mk_index(i));
1336                 else
1337                         XA_BUG_ON(xa, entry != NULL);
1338                 XA_BUG_ON(xa, i != xas.xa_index);
1339                 i++;
1340         } while (i < (1 << 16));
1341         rcu_read_unlock();
1342 
1343         xa_destroy(xa);
1344 
1345         check_move_tiny(xa);
1346         check_move_max(xa);
1347 
1348         for (i = 0; i < 16; i++)
1349                 check_move_small(xa, 1UL << i);
1350 
1351         for (i = 2; i < 16; i++)
1352                 check_move_small(xa, (1UL << i) - 1);
1353 }
1354 
1355 static noinline void xa_store_many_order(struct xarray *xa,
1356                 unsigned long index, unsigned order)
1357 {
1358         XA_STATE_ORDER(xas, xa, index, order);
1359         unsigned int i = 0;
1360 
1361         do {
1362                 xas_lock(&xas);
1363                 XA_BUG_ON(xa, xas_find_conflict(&xas));
1364                 xas_create_range(&xas);
1365                 if (xas_error(&xas))
1366                         goto unlock;
1367                 for (i = 0; i < (1U << order); i++) {
1368                         XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
1369                         xas_next(&xas);
1370                 }
1371 unlock:
1372                 xas_unlock(&xas);
1373         } while (xas_nomem(&xas, GFP_KERNEL));
1374 
1375         XA_BUG_ON(xa, xas_error(&xas));
1376 }
1377 
1378 static noinline void check_create_range_1(struct xarray *xa,
1379                 unsigned long index, unsigned order)
1380 {
1381         unsigned long i;
1382 
1383         xa_store_many_order(xa, index, order);
1384         for (i = index; i < index + (1UL << order); i++)
1385                 xa_erase_index(xa, i);
1386         XA_BUG_ON(xa, !xa_empty(xa));
1387 }
1388 
1389 static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1390 {
1391         unsigned long i;
1392         unsigned long nr = 1UL << order;
1393 
1394         for (i = 0; i < nr * nr; i += nr)
1395                 xa_store_many_order(xa, i, order);
1396         for (i = 0; i < nr * nr; i++)
1397                 xa_erase_index(xa, i);
1398         XA_BUG_ON(xa, !xa_empty(xa));
1399 }
1400 
1401 static noinline void check_create_range_3(void)
1402 {
1403         XA_STATE(xas, NULL, 0);
1404         xas_set_err(&xas, -EEXIST);
1405         xas_create_range(&xas);
1406         XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1407 }
1408 
1409 static noinline void check_create_range_4(struct xarray *xa,
1410                 unsigned long index, unsigned order)
1411 {
1412         XA_STATE_ORDER(xas, xa, index, order);
1413         unsigned long base = xas.xa_index;
1414         unsigned long i = 0;
1415 
1416         xa_store_index(xa, index, GFP_KERNEL);
1417         do {
1418                 xas_lock(&xas);
1419                 xas_create_range(&xas);
1420                 if (xas_error(&xas))
1421                         goto unlock;
1422                 for (i = 0; i < (1UL << order); i++) {
1423                         void *old = xas_store(&xas, xa_mk_index(base + i));
1424                         if (xas.xa_index == index)
1425                                 XA_BUG_ON(xa, old != xa_mk_index(base + i));
1426                         else
1427                                 XA_BUG_ON(xa, old != NULL);
1428                         xas_next(&xas);
1429                 }
1430 unlock:
1431                 xas_unlock(&xas);
1432         } while (xas_nomem(&xas, GFP_KERNEL));
1433 
1434         XA_BUG_ON(xa, xas_error(&xas));
1435 
1436         for (i = base; i < base + (1UL << order); i++)
1437                 xa_erase_index(xa, i);
1438         XA_BUG_ON(xa, !xa_empty(xa));
1439 }
1440 
1441 static noinline void check_create_range(struct xarray *xa)
1442 {
1443         unsigned int order;
1444         unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1445 
1446         for (order = 0; order < max_order; order++) {
1447                 check_create_range_1(xa, 0, order);
1448                 check_create_range_1(xa, 1U << order, order);
1449                 check_create_range_1(xa, 2U << order, order);
1450                 check_create_range_1(xa, 3U << order, order);
1451                 check_create_range_1(xa, 1U << 24, order);
1452                 if (order < 10)
1453                         check_create_range_2(xa, order);
1454 
1455                 check_create_range_4(xa, 0, order);
1456                 check_create_range_4(xa, 1U << order, order);
1457                 check_create_range_4(xa, 2U << order, order);
1458                 check_create_range_4(xa, 3U << order, order);
1459                 check_create_range_4(xa, 1U << 24, order);
1460 
1461                 check_create_range_4(xa, 1, order);
1462                 check_create_range_4(xa, (1U << order) + 1, order);
1463                 check_create_range_4(xa, (2U << order) + 1, order);
1464                 check_create_range_4(xa, (2U << order) - 1, order);
1465                 check_create_range_4(xa, (3U << order) + 1, order);
1466                 check_create_range_4(xa, (3U << order) - 1, order);
1467                 check_create_range_4(xa, (1U << 24) + 1, order);
1468         }
1469 
1470         check_create_range_3();
1471 }
1472 
1473 static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1474                 unsigned long last)
1475 {
1476 #ifdef CONFIG_XARRAY_MULTI
1477         xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1478 
1479         XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1480         XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1481         XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1482         XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1483 
1484         xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1485 #endif
1486 
1487         XA_BUG_ON(xa, !xa_empty(xa));
1488 }
1489 
1490 static noinline void check_store_range(struct xarray *xa)
1491 {
1492         unsigned long i, j;
1493 
1494         for (i = 0; i < 128; i++) {
1495                 for (j = i; j < 128; j++) {
1496                         __check_store_range(xa, i, j);
1497                         __check_store_range(xa, 128 + i, 128 + j);
1498                         __check_store_range(xa, 4095 + i, 4095 + j);
1499                         __check_store_range(xa, 4096 + i, 4096 + j);
1500                         __check_store_range(xa, 123456 + i, 123456 + j);
1501                         __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1502                 }
1503         }
1504 }
1505 
1506 static void check_align_1(struct xarray *xa, char *name)
1507 {
1508         int i;
1509         unsigned int id;
1510         unsigned long index;
1511         void *entry;
1512 
1513         for (i = 0; i < 8; i++) {
1514                 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1515                                         GFP_KERNEL) != 0);
1516                 XA_BUG_ON(xa, id != i);
1517         }
1518         xa_for_each(xa, index, entry)
1519                 XA_BUG_ON(xa, xa_is_err(entry));
1520         xa_destroy(xa);
1521 }
1522 
1523 /*
1524  * We should always be able to store without allocating memory after
1525  * reserving a slot.
1526  */
1527 static void check_align_2(struct xarray *xa, char *name)
1528 {
1529         int i;
1530 
1531         XA_BUG_ON(xa, !xa_empty(xa));
1532 
1533         for (i = 0; i < 8; i++) {
1534                 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
1535                 xa_erase(xa, 0);
1536         }
1537 
1538         for (i = 0; i < 8; i++) {
1539                 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
1540                 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
1541                 xa_erase(xa, 0);
1542         }
1543 
1544         XA_BUG_ON(xa, !xa_empty(xa));
1545 }
1546 
1547 static noinline void check_align(struct xarray *xa)
1548 {
1549         char name[] = "Motorola 68000";
1550 
1551         check_align_1(xa, name);
1552         check_align_1(xa, name + 1);
1553         check_align_1(xa, name + 2);
1554         check_align_1(xa, name + 3);
1555         check_align_2(xa, name);
1556 }
1557 
1558 static LIST_HEAD(shadow_nodes);
1559 
1560 static void test_update_node(struct xa_node *node)
1561 {
1562         if (node->count && node->count == node->nr_values) {
1563                 if (list_empty(&node->private_list))
1564                         list_add(&shadow_nodes, &node->private_list);
1565         } else {
1566                 if (!list_empty(&node->private_list))
1567                         list_del_init(&node->private_list);
1568         }
1569 }
1570 
1571 static noinline void shadow_remove(struct xarray *xa)
1572 {
1573         struct xa_node *node;
1574 
1575         xa_lock(xa);
1576         while ((node = list_first_entry_or_null(&shadow_nodes,
1577                                         struct xa_node, private_list))) {
1578                 XA_STATE(xas, node->array, 0);
1579                 XA_BUG_ON(xa, node->array != xa);
1580                 list_del_init(&node->private_list);
1581                 xas.xa_node = xa_parent_locked(node->array, node);
1582                 xas.xa_offset = node->offset;
1583                 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1584                 xas_set_update(&xas, test_update_node);
1585                 xas_store(&xas, NULL);
1586         }
1587         xa_unlock(xa);
1588 }
1589 
1590 static noinline void check_workingset(struct xarray *xa, unsigned long index)
1591 {
1592         XA_STATE(xas, xa, index);
1593         xas_set_update(&xas, test_update_node);
1594 
1595         do {
1596                 xas_lock(&xas);
1597                 xas_store(&xas, xa_mk_value(0));
1598                 xas_next(&xas);
1599                 xas_store(&xas, xa_mk_value(1));
1600                 xas_unlock(&xas);
1601         } while (xas_nomem(&xas, GFP_KERNEL));
1602 
1603         XA_BUG_ON(xa, list_empty(&shadow_nodes));
1604 
1605         xas_lock(&xas);
1606         xas_next(&xas);
1607         xas_store(&xas, &xas);
1608         XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1609 
1610         xas_store(&xas, xa_mk_value(2));
1611         xas_unlock(&xas);
1612         XA_BUG_ON(xa, list_empty(&shadow_nodes));
1613 
1614         shadow_remove(xa);
1615         XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1616         XA_BUG_ON(xa, !xa_empty(xa));
1617 }
1618 
1619 /*
1620  * Check that the pointer / value / sibling entries are accounted the
1621  * way we expect them to be.
1622  */
1623 static noinline void check_account(struct xarray *xa)
1624 {
1625 #ifdef CONFIG_XARRAY_MULTI
1626         unsigned int order;
1627 
1628         for (order = 1; order < 12; order++) {
1629                 XA_STATE(xas, xa, 1 << order);
1630 
1631                 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1632                 rcu_read_lock();
1633                 xas_load(&xas);
1634                 XA_BUG_ON(xa, xas.xa_node->count == 0);
1635                 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1636                 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1637                 rcu_read_unlock();
1638 
1639                 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1640                                 GFP_KERNEL);
1641                 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1642 
1643                 xa_erase(xa, 1 << order);
1644                 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1645 
1646                 xa_erase(xa, 0);
1647                 XA_BUG_ON(xa, !xa_empty(xa));
1648         }
1649 #endif
1650 }
1651 
1652 static noinline void check_destroy(struct xarray *xa)
1653 {
1654         unsigned long index;
1655 
1656         XA_BUG_ON(xa, !xa_empty(xa));
1657 
1658         /* Destroying an empty array is a no-op */
1659         xa_destroy(xa);
1660         XA_BUG_ON(xa, !xa_empty(xa));
1661 
1662         /* Destroying an array with a single entry */
1663         for (index = 0; index < 1000; index++) {
1664                 xa_store_index(xa, index, GFP_KERNEL);
1665                 XA_BUG_ON(xa, xa_empty(xa));
1666                 xa_destroy(xa);
1667                 XA_BUG_ON(xa, !xa_empty(xa));
1668         }
1669 
1670         /* Destroying an array with a single entry at ULONG_MAX */
1671         xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1672         XA_BUG_ON(xa, xa_empty(xa));
1673         xa_destroy(xa);
1674         XA_BUG_ON(xa, !xa_empty(xa));
1675 
1676 #ifdef CONFIG_XARRAY_MULTI
1677         /* Destroying an array with a multi-index entry */
1678         xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1679         XA_BUG_ON(xa, xa_empty(xa));
1680         xa_destroy(xa);
1681         XA_BUG_ON(xa, !xa_empty(xa));
1682 #endif
1683 }
1684 
1685 static DEFINE_XARRAY(array);
1686 
1687 static int xarray_checks(void)
1688 {
1689         check_xa_err(&array);
1690         check_xas_retry(&array);
1691         check_xa_load(&array);
1692         check_xa_mark(&array);
1693         check_xa_shrink(&array);
1694         check_xas_erase(&array);
1695         check_insert(&array);
1696         check_cmpxchg(&array);
1697         check_reserve(&array);
1698         check_reserve(&xa0);
1699         check_multi_store(&array);
1700         check_xa_alloc();
1701         check_find(&array);
1702         check_find_entry(&array);
1703         check_pause(&array);
1704         check_account(&array);
1705         check_destroy(&array);
1706         check_move(&array);
1707         check_create_range(&array);
1708         check_store_range(&array);
1709         check_store_iter(&array);
1710         check_align(&xa0);
1711 
1712         check_workingset(&array, 0);
1713         check_workingset(&array, 64);
1714         check_workingset(&array, 4096);
1715 
1716         printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1717         return (tests_run == tests_passed) ? 0 : -EINVAL;
1718 }
1719 
1720 static void xarray_exit(void)
1721 {
1722 }
1723 
1724 module_init(xarray_checks);
1725 module_exit(xarray_exit);
1726 MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1727 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */