root/kernel/locking/test-ww_mutex.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. test_mutex_work
  2. __test_mutex
  3. test_mutex
  4. test_aa
  5. test_abba_work
  6. test_abba
  7. test_cycle_work
  8. __test_cycle
  9. test_cycle
  10. get_random_order
  11. dummy_load
  12. stress_inorder_work
  13. stress_reorder_work
  14. stress_one_work
  15. stress
  16. test_ww_mutex_init
  17. test_ww_mutex_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Module-based API test facility for ww_mutexes
   4  */
   5 
   6 #include <linux/kernel.h>
   7 
   8 #include <linux/completion.h>
   9 #include <linux/delay.h>
  10 #include <linux/kthread.h>
  11 #include <linux/module.h>
  12 #include <linux/random.h>
  13 #include <linux/slab.h>
  14 #include <linux/ww_mutex.h>
  15 
  16 static DEFINE_WD_CLASS(ww_class);
  17 struct workqueue_struct *wq;
  18 
  19 struct test_mutex {
  20         struct work_struct work;
  21         struct ww_mutex mutex;
  22         struct completion ready, go, done;
  23         unsigned int flags;
  24 };
  25 
  26 #define TEST_MTX_SPIN BIT(0)
  27 #define TEST_MTX_TRY BIT(1)
  28 #define TEST_MTX_CTX BIT(2)
  29 #define __TEST_MTX_LAST BIT(3)
  30 
  31 static void test_mutex_work(struct work_struct *work)
  32 {
  33         struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
  34 
  35         complete(&mtx->ready);
  36         wait_for_completion(&mtx->go);
  37 
  38         if (mtx->flags & TEST_MTX_TRY) {
  39                 while (!ww_mutex_trylock(&mtx->mutex))
  40                         cond_resched();
  41         } else {
  42                 ww_mutex_lock(&mtx->mutex, NULL);
  43         }
  44         complete(&mtx->done);
  45         ww_mutex_unlock(&mtx->mutex);
  46 }
  47 
  48 static int __test_mutex(unsigned int flags)
  49 {
  50 #define TIMEOUT (HZ / 16)
  51         struct test_mutex mtx;
  52         struct ww_acquire_ctx ctx;
  53         int ret;
  54 
  55         ww_mutex_init(&mtx.mutex, &ww_class);
  56         ww_acquire_init(&ctx, &ww_class);
  57 
  58         INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
  59         init_completion(&mtx.ready);
  60         init_completion(&mtx.go);
  61         init_completion(&mtx.done);
  62         mtx.flags = flags;
  63 
  64         schedule_work(&mtx.work);
  65 
  66         wait_for_completion(&mtx.ready);
  67         ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
  68         complete(&mtx.go);
  69         if (flags & TEST_MTX_SPIN) {
  70                 unsigned long timeout = jiffies + TIMEOUT;
  71 
  72                 ret = 0;
  73                 do {
  74                         if (completion_done(&mtx.done)) {
  75                                 ret = -EINVAL;
  76                                 break;
  77                         }
  78                         cond_resched();
  79                 } while (time_before(jiffies, timeout));
  80         } else {
  81                 ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
  82         }
  83         ww_mutex_unlock(&mtx.mutex);
  84         ww_acquire_fini(&ctx);
  85 
  86         if (ret) {
  87                 pr_err("%s(flags=%x): mutual exclusion failure\n",
  88                        __func__, flags);
  89                 ret = -EINVAL;
  90         }
  91 
  92         flush_work(&mtx.work);
  93         destroy_work_on_stack(&mtx.work);
  94         return ret;
  95 #undef TIMEOUT
  96 }
  97 
  98 static int test_mutex(void)
  99 {
 100         int ret;
 101         int i;
 102 
 103         for (i = 0; i < __TEST_MTX_LAST; i++) {
 104                 ret = __test_mutex(i);
 105                 if (ret)
 106                         return ret;
 107         }
 108 
 109         return 0;
 110 }
 111 
 112 static int test_aa(void)
 113 {
 114         struct ww_mutex mutex;
 115         struct ww_acquire_ctx ctx;
 116         int ret;
 117 
 118         ww_mutex_init(&mutex, &ww_class);
 119         ww_acquire_init(&ctx, &ww_class);
 120 
 121         ww_mutex_lock(&mutex, &ctx);
 122 
 123         if (ww_mutex_trylock(&mutex))  {
 124                 pr_err("%s: trylocked itself!\n", __func__);
 125                 ww_mutex_unlock(&mutex);
 126                 ret = -EINVAL;
 127                 goto out;
 128         }
 129 
 130         ret = ww_mutex_lock(&mutex, &ctx);
 131         if (ret != -EALREADY) {
 132                 pr_err("%s: missed deadlock for recursing, ret=%d\n",
 133                        __func__, ret);
 134                 if (!ret)
 135                         ww_mutex_unlock(&mutex);
 136                 ret = -EINVAL;
 137                 goto out;
 138         }
 139 
 140         ret = 0;
 141 out:
 142         ww_mutex_unlock(&mutex);
 143         ww_acquire_fini(&ctx);
 144         return ret;
 145 }
 146 
 147 struct test_abba {
 148         struct work_struct work;
 149         struct ww_mutex a_mutex;
 150         struct ww_mutex b_mutex;
 151         struct completion a_ready;
 152         struct completion b_ready;
 153         bool resolve;
 154         int result;
 155 };
 156 
 157 static void test_abba_work(struct work_struct *work)
 158 {
 159         struct test_abba *abba = container_of(work, typeof(*abba), work);
 160         struct ww_acquire_ctx ctx;
 161         int err;
 162 
 163         ww_acquire_init(&ctx, &ww_class);
 164         ww_mutex_lock(&abba->b_mutex, &ctx);
 165 
 166         complete(&abba->b_ready);
 167         wait_for_completion(&abba->a_ready);
 168 
 169         err = ww_mutex_lock(&abba->a_mutex, &ctx);
 170         if (abba->resolve && err == -EDEADLK) {
 171                 ww_mutex_unlock(&abba->b_mutex);
 172                 ww_mutex_lock_slow(&abba->a_mutex, &ctx);
 173                 err = ww_mutex_lock(&abba->b_mutex, &ctx);
 174         }
 175 
 176         if (!err)
 177                 ww_mutex_unlock(&abba->a_mutex);
 178         ww_mutex_unlock(&abba->b_mutex);
 179         ww_acquire_fini(&ctx);
 180 
 181         abba->result = err;
 182 }
 183 
 184 static int test_abba(bool resolve)
 185 {
 186         struct test_abba abba;
 187         struct ww_acquire_ctx ctx;
 188         int err, ret;
 189 
 190         ww_mutex_init(&abba.a_mutex, &ww_class);
 191         ww_mutex_init(&abba.b_mutex, &ww_class);
 192         INIT_WORK_ONSTACK(&abba.work, test_abba_work);
 193         init_completion(&abba.a_ready);
 194         init_completion(&abba.b_ready);
 195         abba.resolve = resolve;
 196 
 197         schedule_work(&abba.work);
 198 
 199         ww_acquire_init(&ctx, &ww_class);
 200         ww_mutex_lock(&abba.a_mutex, &ctx);
 201 
 202         complete(&abba.a_ready);
 203         wait_for_completion(&abba.b_ready);
 204 
 205         err = ww_mutex_lock(&abba.b_mutex, &ctx);
 206         if (resolve && err == -EDEADLK) {
 207                 ww_mutex_unlock(&abba.a_mutex);
 208                 ww_mutex_lock_slow(&abba.b_mutex, &ctx);
 209                 err = ww_mutex_lock(&abba.a_mutex, &ctx);
 210         }
 211 
 212         if (!err)
 213                 ww_mutex_unlock(&abba.b_mutex);
 214         ww_mutex_unlock(&abba.a_mutex);
 215         ww_acquire_fini(&ctx);
 216 
 217         flush_work(&abba.work);
 218         destroy_work_on_stack(&abba.work);
 219 
 220         ret = 0;
 221         if (resolve) {
 222                 if (err || abba.result) {
 223                         pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
 224                                __func__, err, abba.result);
 225                         ret = -EINVAL;
 226                 }
 227         } else {
 228                 if (err != -EDEADLK && abba.result != -EDEADLK) {
 229                         pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
 230                                __func__, err, abba.result);
 231                         ret = -EINVAL;
 232                 }
 233         }
 234         return ret;
 235 }
 236 
 237 struct test_cycle {
 238         struct work_struct work;
 239         struct ww_mutex a_mutex;
 240         struct ww_mutex *b_mutex;
 241         struct completion *a_signal;
 242         struct completion b_signal;
 243         int result;
 244 };
 245 
 246 static void test_cycle_work(struct work_struct *work)
 247 {
 248         struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
 249         struct ww_acquire_ctx ctx;
 250         int err, erra = 0;
 251 
 252         ww_acquire_init(&ctx, &ww_class);
 253         ww_mutex_lock(&cycle->a_mutex, &ctx);
 254 
 255         complete(cycle->a_signal);
 256         wait_for_completion(&cycle->b_signal);
 257 
 258         err = ww_mutex_lock(cycle->b_mutex, &ctx);
 259         if (err == -EDEADLK) {
 260                 err = 0;
 261                 ww_mutex_unlock(&cycle->a_mutex);
 262                 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
 263                 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
 264         }
 265 
 266         if (!err)
 267                 ww_mutex_unlock(cycle->b_mutex);
 268         if (!erra)
 269                 ww_mutex_unlock(&cycle->a_mutex);
 270         ww_acquire_fini(&ctx);
 271 
 272         cycle->result = err ?: erra;
 273 }
 274 
 275 static int __test_cycle(unsigned int nthreads)
 276 {
 277         struct test_cycle *cycles;
 278         unsigned int n, last = nthreads - 1;
 279         int ret;
 280 
 281         cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
 282         if (!cycles)
 283                 return -ENOMEM;
 284 
 285         for (n = 0; n < nthreads; n++) {
 286                 struct test_cycle *cycle = &cycles[n];
 287 
 288                 ww_mutex_init(&cycle->a_mutex, &ww_class);
 289                 if (n == last)
 290                         cycle->b_mutex = &cycles[0].a_mutex;
 291                 else
 292                         cycle->b_mutex = &cycles[n + 1].a_mutex;
 293 
 294                 if (n == 0)
 295                         cycle->a_signal = &cycles[last].b_signal;
 296                 else
 297                         cycle->a_signal = &cycles[n - 1].b_signal;
 298                 init_completion(&cycle->b_signal);
 299 
 300                 INIT_WORK(&cycle->work, test_cycle_work);
 301                 cycle->result = 0;
 302         }
 303 
 304         for (n = 0; n < nthreads; n++)
 305                 queue_work(wq, &cycles[n].work);
 306 
 307         flush_workqueue(wq);
 308 
 309         ret = 0;
 310         for (n = 0; n < nthreads; n++) {
 311                 struct test_cycle *cycle = &cycles[n];
 312 
 313                 if (!cycle->result)
 314                         continue;
 315 
 316                 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
 317                        n, nthreads, cycle->result);
 318                 ret = -EINVAL;
 319                 break;
 320         }
 321 
 322         for (n = 0; n < nthreads; n++)
 323                 ww_mutex_destroy(&cycles[n].a_mutex);
 324         kfree(cycles);
 325         return ret;
 326 }
 327 
 328 static int test_cycle(unsigned int ncpus)
 329 {
 330         unsigned int n;
 331         int ret;
 332 
 333         for (n = 2; n <= ncpus + 1; n++) {
 334                 ret = __test_cycle(n);
 335                 if (ret)
 336                         return ret;
 337         }
 338 
 339         return 0;
 340 }
 341 
 342 struct stress {
 343         struct work_struct work;
 344         struct ww_mutex *locks;
 345         unsigned long timeout;
 346         int nlocks;
 347 };
 348 
 349 static int *get_random_order(int count)
 350 {
 351         int *order;
 352         int n, r, tmp;
 353 
 354         order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
 355         if (!order)
 356                 return order;
 357 
 358         for (n = 0; n < count; n++)
 359                 order[n] = n;
 360 
 361         for (n = count - 1; n > 1; n--) {
 362                 r = get_random_int() % (n + 1);
 363                 if (r != n) {
 364                         tmp = order[n];
 365                         order[n] = order[r];
 366                         order[r] = tmp;
 367                 }
 368         }
 369 
 370         return order;
 371 }
 372 
 373 static void dummy_load(struct stress *stress)
 374 {
 375         usleep_range(1000, 2000);
 376 }
 377 
 378 static void stress_inorder_work(struct work_struct *work)
 379 {
 380         struct stress *stress = container_of(work, typeof(*stress), work);
 381         const int nlocks = stress->nlocks;
 382         struct ww_mutex *locks = stress->locks;
 383         struct ww_acquire_ctx ctx;
 384         int *order;
 385 
 386         order = get_random_order(nlocks);
 387         if (!order)
 388                 return;
 389 
 390         do {
 391                 int contended = -1;
 392                 int n, err;
 393 
 394                 ww_acquire_init(&ctx, &ww_class);
 395 retry:
 396                 err = 0;
 397                 for (n = 0; n < nlocks; n++) {
 398                         if (n == contended)
 399                                 continue;
 400 
 401                         err = ww_mutex_lock(&locks[order[n]], &ctx);
 402                         if (err < 0)
 403                                 break;
 404                 }
 405                 if (!err)
 406                         dummy_load(stress);
 407 
 408                 if (contended > n)
 409                         ww_mutex_unlock(&locks[order[contended]]);
 410                 contended = n;
 411                 while (n--)
 412                         ww_mutex_unlock(&locks[order[n]]);
 413 
 414                 if (err == -EDEADLK) {
 415                         ww_mutex_lock_slow(&locks[order[contended]], &ctx);
 416                         goto retry;
 417                 }
 418 
 419                 if (err) {
 420                         pr_err_once("stress (%s) failed with %d\n",
 421                                     __func__, err);
 422                         break;
 423                 }
 424 
 425                 ww_acquire_fini(&ctx);
 426         } while (!time_after(jiffies, stress->timeout));
 427 
 428         kfree(order);
 429         kfree(stress);
 430 }
 431 
 432 struct reorder_lock {
 433         struct list_head link;
 434         struct ww_mutex *lock;
 435 };
 436 
 437 static void stress_reorder_work(struct work_struct *work)
 438 {
 439         struct stress *stress = container_of(work, typeof(*stress), work);
 440         LIST_HEAD(locks);
 441         struct ww_acquire_ctx ctx;
 442         struct reorder_lock *ll, *ln;
 443         int *order;
 444         int n, err;
 445 
 446         order = get_random_order(stress->nlocks);
 447         if (!order)
 448                 return;
 449 
 450         for (n = 0; n < stress->nlocks; n++) {
 451                 ll = kmalloc(sizeof(*ll), GFP_KERNEL);
 452                 if (!ll)
 453                         goto out;
 454 
 455                 ll->lock = &stress->locks[order[n]];
 456                 list_add(&ll->link, &locks);
 457         }
 458         kfree(order);
 459         order = NULL;
 460 
 461         do {
 462                 ww_acquire_init(&ctx, &ww_class);
 463 
 464                 list_for_each_entry(ll, &locks, link) {
 465                         err = ww_mutex_lock(ll->lock, &ctx);
 466                         if (!err)
 467                                 continue;
 468 
 469                         ln = ll;
 470                         list_for_each_entry_continue_reverse(ln, &locks, link)
 471                                 ww_mutex_unlock(ln->lock);
 472 
 473                         if (err != -EDEADLK) {
 474                                 pr_err_once("stress (%s) failed with %d\n",
 475                                             __func__, err);
 476                                 break;
 477                         }
 478 
 479                         ww_mutex_lock_slow(ll->lock, &ctx);
 480                         list_move(&ll->link, &locks); /* restarts iteration */
 481                 }
 482 
 483                 dummy_load(stress);
 484                 list_for_each_entry(ll, &locks, link)
 485                         ww_mutex_unlock(ll->lock);
 486 
 487                 ww_acquire_fini(&ctx);
 488         } while (!time_after(jiffies, stress->timeout));
 489 
 490 out:
 491         list_for_each_entry_safe(ll, ln, &locks, link)
 492                 kfree(ll);
 493         kfree(order);
 494         kfree(stress);
 495 }
 496 
 497 static void stress_one_work(struct work_struct *work)
 498 {
 499         struct stress *stress = container_of(work, typeof(*stress), work);
 500         const int nlocks = stress->nlocks;
 501         struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
 502         int err;
 503 
 504         do {
 505                 err = ww_mutex_lock(lock, NULL);
 506                 if (!err) {
 507                         dummy_load(stress);
 508                         ww_mutex_unlock(lock);
 509                 } else {
 510                         pr_err_once("stress (%s) failed with %d\n",
 511                                     __func__, err);
 512                         break;
 513                 }
 514         } while (!time_after(jiffies, stress->timeout));
 515 
 516         kfree(stress);
 517 }
 518 
 519 #define STRESS_INORDER BIT(0)
 520 #define STRESS_REORDER BIT(1)
 521 #define STRESS_ONE BIT(2)
 522 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
 523 
 524 static int stress(int nlocks, int nthreads, unsigned int flags)
 525 {
 526         struct ww_mutex *locks;
 527         int n;
 528 
 529         locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
 530         if (!locks)
 531                 return -ENOMEM;
 532 
 533         for (n = 0; n < nlocks; n++)
 534                 ww_mutex_init(&locks[n], &ww_class);
 535 
 536         for (n = 0; nthreads; n++) {
 537                 struct stress *stress;
 538                 void (*fn)(struct work_struct *work);
 539 
 540                 fn = NULL;
 541                 switch (n & 3) {
 542                 case 0:
 543                         if (flags & STRESS_INORDER)
 544                                 fn = stress_inorder_work;
 545                         break;
 546                 case 1:
 547                         if (flags & STRESS_REORDER)
 548                                 fn = stress_reorder_work;
 549                         break;
 550                 case 2:
 551                         if (flags & STRESS_ONE)
 552                                 fn = stress_one_work;
 553                         break;
 554                 }
 555 
 556                 if (!fn)
 557                         continue;
 558 
 559                 stress = kmalloc(sizeof(*stress), GFP_KERNEL);
 560                 if (!stress)
 561                         break;
 562 
 563                 INIT_WORK(&stress->work, fn);
 564                 stress->locks = locks;
 565                 stress->nlocks = nlocks;
 566                 stress->timeout = jiffies + 2*HZ;
 567 
 568                 queue_work(wq, &stress->work);
 569                 nthreads--;
 570         }
 571 
 572         flush_workqueue(wq);
 573 
 574         for (n = 0; n < nlocks; n++)
 575                 ww_mutex_destroy(&locks[n]);
 576         kfree(locks);
 577 
 578         return 0;
 579 }
 580 
 581 static int __init test_ww_mutex_init(void)
 582 {
 583         int ncpus = num_online_cpus();
 584         int ret;
 585 
 586         wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
 587         if (!wq)
 588                 return -ENOMEM;
 589 
 590         ret = test_mutex();
 591         if (ret)
 592                 return ret;
 593 
 594         ret = test_aa();
 595         if (ret)
 596                 return ret;
 597 
 598         ret = test_abba(false);
 599         if (ret)
 600                 return ret;
 601 
 602         ret = test_abba(true);
 603         if (ret)
 604                 return ret;
 605 
 606         ret = test_cycle(ncpus);
 607         if (ret)
 608                 return ret;
 609 
 610         ret = stress(16, 2*ncpus, STRESS_INORDER);
 611         if (ret)
 612                 return ret;
 613 
 614         ret = stress(16, 2*ncpus, STRESS_REORDER);
 615         if (ret)
 616                 return ret;
 617 
 618         ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
 619         if (ret)
 620                 return ret;
 621 
 622         return 0;
 623 }
 624 
 625 static void __exit test_ww_mutex_exit(void)
 626 {
 627         destroy_workqueue(wq);
 628 }
 629 
 630 module_init(test_ww_mutex_init);
 631 module_exit(test_ww_mutex_exit);
 632 
 633 MODULE_LICENSE("GPL");
 634 MODULE_AUTHOR("Intel Corporation");

/* [<][>][^][v][top][bottom][index][help] */