root/kernel/locking/locktorture.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. torture_lock_busted_write_lock
  2. torture_lock_busted_write_delay
  3. torture_lock_busted_write_unlock
  4. torture_boost_dummy
  5. torture_spin_lock_write_lock
  6. torture_spin_lock_write_delay
  7. torture_spin_lock_write_unlock
  8. torture_spin_lock_write_lock_irq
  9. torture_lock_spin_write_unlock_irq
  10. torture_rwlock_write_lock
  11. torture_rwlock_write_delay
  12. torture_rwlock_write_unlock
  13. torture_rwlock_read_lock
  14. torture_rwlock_read_delay
  15. torture_rwlock_read_unlock
  16. torture_rwlock_write_lock_irq
  17. torture_rwlock_write_unlock_irq
  18. torture_rwlock_read_lock_irq
  19. torture_rwlock_read_unlock_irq
  20. torture_mutex_lock
  21. torture_mutex_delay
  22. torture_mutex_unlock
  23. torture_ww_mutex_lock
  24. torture_ww_mutex_unlock
  25. torture_rtmutex_lock
  26. torture_rtmutex_boost
  27. torture_rtmutex_delay
  28. torture_rtmutex_unlock
  29. torture_rwsem_down_write
  30. torture_rwsem_write_delay
  31. torture_rwsem_up_write
  32. torture_rwsem_down_read
  33. torture_rwsem_read_delay
  34. torture_rwsem_up_read
  35. torture_percpu_rwsem_init
  36. torture_percpu_rwsem_down_write
  37. torture_percpu_rwsem_up_write
  38. torture_percpu_rwsem_down_read
  39. torture_percpu_rwsem_up_read
  40. lock_torture_writer
  41. lock_torture_reader
  42. __torture_print_stats
  43. lock_torture_stats_print
  44. lock_torture_stats
  45. lock_torture_print_module_parms
  46. lock_torture_cleanup
  47. lock_torture_init

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * Module-based torture test facility for locking
   4  *
   5  * Copyright (C) IBM Corporation, 2014
   6  *
   7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
   8  *          Davidlohr Bueso <dave@stgolabs.net>
   9  *      Based on kernel/rcu/torture.c.
  10  */
  11 
  12 #define pr_fmt(fmt) fmt
  13 
  14 #include <linux/kernel.h>
  15 #include <linux/module.h>
  16 #include <linux/kthread.h>
  17 #include <linux/sched/rt.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/rwlock.h>
  20 #include <linux/mutex.h>
  21 #include <linux/rwsem.h>
  22 #include <linux/smp.h>
  23 #include <linux/interrupt.h>
  24 #include <linux/sched.h>
  25 #include <uapi/linux/sched/types.h>
  26 #include <linux/rtmutex.h>
  27 #include <linux/atomic.h>
  28 #include <linux/moduleparam.h>
  29 #include <linux/delay.h>
  30 #include <linux/slab.h>
  31 #include <linux/percpu-rwsem.h>
  32 #include <linux/torture.h>
  33 
  34 MODULE_LICENSE("GPL");
  35 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
  36 
  37 torture_param(int, nwriters_stress, -1,
  38              "Number of write-locking stress-test threads");
  39 torture_param(int, nreaders_stress, -1,
  40              "Number of read-locking stress-test threads");
  41 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  42 torture_param(int, onoff_interval, 0,
  43              "Time between CPU hotplugs (s), 0=disable");
  44 torture_param(int, shuffle_interval, 3,
  45              "Number of jiffies between shuffles, 0=disable");
  46 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  47 torture_param(int, stat_interval, 60,
  48              "Number of seconds between stats printk()s");
  49 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  50 torture_param(int, verbose, 1,
  51              "Enable verbose debugging printk()s");
  52 
  53 static char *torture_type = "spin_lock";
  54 module_param(torture_type, charp, 0444);
  55 MODULE_PARM_DESC(torture_type,
  56                  "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  57 
  58 static struct task_struct *stats_task;
  59 static struct task_struct **writer_tasks;
  60 static struct task_struct **reader_tasks;
  61 
  62 static bool lock_is_write_held;
  63 static bool lock_is_read_held;
  64 
  65 struct lock_stress_stats {
  66         long n_lock_fail;
  67         long n_lock_acquired;
  68 };
  69 
  70 /* Forward reference. */
  71 static void lock_torture_cleanup(void);
  72 
  73 /*
  74  * Operations vector for selecting different types of tests.
  75  */
  76 struct lock_torture_ops {
  77         void (*init)(void);
  78         int (*writelock)(void);
  79         void (*write_delay)(struct torture_random_state *trsp);
  80         void (*task_boost)(struct torture_random_state *trsp);
  81         void (*writeunlock)(void);
  82         int (*readlock)(void);
  83         void (*read_delay)(struct torture_random_state *trsp);
  84         void (*readunlock)(void);
  85 
  86         unsigned long flags; /* for irq spinlocks */
  87         const char *name;
  88 };
  89 
  90 struct lock_torture_cxt {
  91         int nrealwriters_stress;
  92         int nrealreaders_stress;
  93         bool debug_lock;
  94         atomic_t n_lock_torture_errors;
  95         struct lock_torture_ops *cur_ops;
  96         struct lock_stress_stats *lwsa; /* writer statistics */
  97         struct lock_stress_stats *lrsa; /* reader statistics */
  98 };
  99 static struct lock_torture_cxt cxt = { 0, 0, false,
 100                                        ATOMIC_INIT(0),
 101                                        NULL, NULL};
 102 /*
 103  * Definitions for lock torture testing.
 104  */
 105 
 106 static int torture_lock_busted_write_lock(void)
 107 {
 108         return 0;  /* BUGGY, do not use in real life!!! */
 109 }
 110 
 111 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
 112 {
 113         const unsigned long longdelay_ms = 100;
 114 
 115         /* We want a long delay occasionally to force massive contention.  */
 116         if (!(torture_random(trsp) %
 117               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 118                 mdelay(longdelay_ms);
 119         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 120                 torture_preempt_schedule();  /* Allow test to be preempted. */
 121 }
 122 
 123 static void torture_lock_busted_write_unlock(void)
 124 {
 125           /* BUGGY, do not use in real life!!! */
 126 }
 127 
 128 static void torture_boost_dummy(struct torture_random_state *trsp)
 129 {
 130         /* Only rtmutexes care about priority */
 131 }
 132 
 133 static struct lock_torture_ops lock_busted_ops = {
 134         .writelock      = torture_lock_busted_write_lock,
 135         .write_delay    = torture_lock_busted_write_delay,
 136         .task_boost     = torture_boost_dummy,
 137         .writeunlock    = torture_lock_busted_write_unlock,
 138         .readlock       = NULL,
 139         .read_delay     = NULL,
 140         .readunlock     = NULL,
 141         .name           = "lock_busted"
 142 };
 143 
 144 static DEFINE_SPINLOCK(torture_spinlock);
 145 
 146 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
 147 {
 148         spin_lock(&torture_spinlock);
 149         return 0;
 150 }
 151 
 152 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
 153 {
 154         const unsigned long shortdelay_us = 2;
 155         const unsigned long longdelay_ms = 100;
 156 
 157         /* We want a short delay mostly to emulate likely code, and
 158          * we want a long delay occasionally to force massive contention.
 159          */
 160         if (!(torture_random(trsp) %
 161               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 162                 mdelay(longdelay_ms);
 163         if (!(torture_random(trsp) %
 164               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
 165                 udelay(shortdelay_us);
 166         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 167                 torture_preempt_schedule();  /* Allow test to be preempted. */
 168 }
 169 
 170 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
 171 {
 172         spin_unlock(&torture_spinlock);
 173 }
 174 
 175 static struct lock_torture_ops spin_lock_ops = {
 176         .writelock      = torture_spin_lock_write_lock,
 177         .write_delay    = torture_spin_lock_write_delay,
 178         .task_boost     = torture_boost_dummy,
 179         .writeunlock    = torture_spin_lock_write_unlock,
 180         .readlock       = NULL,
 181         .read_delay     = NULL,
 182         .readunlock     = NULL,
 183         .name           = "spin_lock"
 184 };
 185 
 186 static int torture_spin_lock_write_lock_irq(void)
 187 __acquires(torture_spinlock)
 188 {
 189         unsigned long flags;
 190 
 191         spin_lock_irqsave(&torture_spinlock, flags);
 192         cxt.cur_ops->flags = flags;
 193         return 0;
 194 }
 195 
 196 static void torture_lock_spin_write_unlock_irq(void)
 197 __releases(torture_spinlock)
 198 {
 199         spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
 200 }
 201 
 202 static struct lock_torture_ops spin_lock_irq_ops = {
 203         .writelock      = torture_spin_lock_write_lock_irq,
 204         .write_delay    = torture_spin_lock_write_delay,
 205         .task_boost     = torture_boost_dummy,
 206         .writeunlock    = torture_lock_spin_write_unlock_irq,
 207         .readlock       = NULL,
 208         .read_delay     = NULL,
 209         .readunlock     = NULL,
 210         .name           = "spin_lock_irq"
 211 };
 212 
 213 static DEFINE_RWLOCK(torture_rwlock);
 214 
 215 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
 216 {
 217         write_lock(&torture_rwlock);
 218         return 0;
 219 }
 220 
 221 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
 222 {
 223         const unsigned long shortdelay_us = 2;
 224         const unsigned long longdelay_ms = 100;
 225 
 226         /* We want a short delay mostly to emulate likely code, and
 227          * we want a long delay occasionally to force massive contention.
 228          */
 229         if (!(torture_random(trsp) %
 230               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 231                 mdelay(longdelay_ms);
 232         else
 233                 udelay(shortdelay_us);
 234 }
 235 
 236 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
 237 {
 238         write_unlock(&torture_rwlock);
 239 }
 240 
 241 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
 242 {
 243         read_lock(&torture_rwlock);
 244         return 0;
 245 }
 246 
 247 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
 248 {
 249         const unsigned long shortdelay_us = 10;
 250         const unsigned long longdelay_ms = 100;
 251 
 252         /* We want a short delay mostly to emulate likely code, and
 253          * we want a long delay occasionally to force massive contention.
 254          */
 255         if (!(torture_random(trsp) %
 256               (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
 257                 mdelay(longdelay_ms);
 258         else
 259                 udelay(shortdelay_us);
 260 }
 261 
 262 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
 263 {
 264         read_unlock(&torture_rwlock);
 265 }
 266 
 267 static struct lock_torture_ops rw_lock_ops = {
 268         .writelock      = torture_rwlock_write_lock,
 269         .write_delay    = torture_rwlock_write_delay,
 270         .task_boost     = torture_boost_dummy,
 271         .writeunlock    = torture_rwlock_write_unlock,
 272         .readlock       = torture_rwlock_read_lock,
 273         .read_delay     = torture_rwlock_read_delay,
 274         .readunlock     = torture_rwlock_read_unlock,
 275         .name           = "rw_lock"
 276 };
 277 
 278 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
 279 {
 280         unsigned long flags;
 281 
 282         write_lock_irqsave(&torture_rwlock, flags);
 283         cxt.cur_ops->flags = flags;
 284         return 0;
 285 }
 286 
 287 static void torture_rwlock_write_unlock_irq(void)
 288 __releases(torture_rwlock)
 289 {
 290         write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 291 }
 292 
 293 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
 294 {
 295         unsigned long flags;
 296 
 297         read_lock_irqsave(&torture_rwlock, flags);
 298         cxt.cur_ops->flags = flags;
 299         return 0;
 300 }
 301 
 302 static void torture_rwlock_read_unlock_irq(void)
 303 __releases(torture_rwlock)
 304 {
 305         read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 306 }
 307 
 308 static struct lock_torture_ops rw_lock_irq_ops = {
 309         .writelock      = torture_rwlock_write_lock_irq,
 310         .write_delay    = torture_rwlock_write_delay,
 311         .task_boost     = torture_boost_dummy,
 312         .writeunlock    = torture_rwlock_write_unlock_irq,
 313         .readlock       = torture_rwlock_read_lock_irq,
 314         .read_delay     = torture_rwlock_read_delay,
 315         .readunlock     = torture_rwlock_read_unlock_irq,
 316         .name           = "rw_lock_irq"
 317 };
 318 
 319 static DEFINE_MUTEX(torture_mutex);
 320 
 321 static int torture_mutex_lock(void) __acquires(torture_mutex)
 322 {
 323         mutex_lock(&torture_mutex);
 324         return 0;
 325 }
 326 
 327 static void torture_mutex_delay(struct torture_random_state *trsp)
 328 {
 329         const unsigned long longdelay_ms = 100;
 330 
 331         /* We want a long delay occasionally to force massive contention.  */
 332         if (!(torture_random(trsp) %
 333               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 334                 mdelay(longdelay_ms * 5);
 335         else
 336                 mdelay(longdelay_ms / 5);
 337         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 338                 torture_preempt_schedule();  /* Allow test to be preempted. */
 339 }
 340 
 341 static void torture_mutex_unlock(void) __releases(torture_mutex)
 342 {
 343         mutex_unlock(&torture_mutex);
 344 }
 345 
 346 static struct lock_torture_ops mutex_lock_ops = {
 347         .writelock      = torture_mutex_lock,
 348         .write_delay    = torture_mutex_delay,
 349         .task_boost     = torture_boost_dummy,
 350         .writeunlock    = torture_mutex_unlock,
 351         .readlock       = NULL,
 352         .read_delay     = NULL,
 353         .readunlock     = NULL,
 354         .name           = "mutex_lock"
 355 };
 356 
 357 #include <linux/ww_mutex.h>
 358 static DEFINE_WD_CLASS(torture_ww_class);
 359 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
 360 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
 361 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
 362 
 363 static int torture_ww_mutex_lock(void)
 364 __acquires(torture_ww_mutex_0)
 365 __acquires(torture_ww_mutex_1)
 366 __acquires(torture_ww_mutex_2)
 367 {
 368         LIST_HEAD(list);
 369         struct reorder_lock {
 370                 struct list_head link;
 371                 struct ww_mutex *lock;
 372         } locks[3], *ll, *ln;
 373         struct ww_acquire_ctx ctx;
 374 
 375         locks[0].lock = &torture_ww_mutex_0;
 376         list_add(&locks[0].link, &list);
 377 
 378         locks[1].lock = &torture_ww_mutex_1;
 379         list_add(&locks[1].link, &list);
 380 
 381         locks[2].lock = &torture_ww_mutex_2;
 382         list_add(&locks[2].link, &list);
 383 
 384         ww_acquire_init(&ctx, &torture_ww_class);
 385 
 386         list_for_each_entry(ll, &list, link) {
 387                 int err;
 388 
 389                 err = ww_mutex_lock(ll->lock, &ctx);
 390                 if (!err)
 391                         continue;
 392 
 393                 ln = ll;
 394                 list_for_each_entry_continue_reverse(ln, &list, link)
 395                         ww_mutex_unlock(ln->lock);
 396 
 397                 if (err != -EDEADLK)
 398                         return err;
 399 
 400                 ww_mutex_lock_slow(ll->lock, &ctx);
 401                 list_move(&ll->link, &list);
 402         }
 403 
 404         ww_acquire_fini(&ctx);
 405         return 0;
 406 }
 407 
 408 static void torture_ww_mutex_unlock(void)
 409 __releases(torture_ww_mutex_0)
 410 __releases(torture_ww_mutex_1)
 411 __releases(torture_ww_mutex_2)
 412 {
 413         ww_mutex_unlock(&torture_ww_mutex_0);
 414         ww_mutex_unlock(&torture_ww_mutex_1);
 415         ww_mutex_unlock(&torture_ww_mutex_2);
 416 }
 417 
 418 static struct lock_torture_ops ww_mutex_lock_ops = {
 419         .writelock      = torture_ww_mutex_lock,
 420         .write_delay    = torture_mutex_delay,
 421         .task_boost     = torture_boost_dummy,
 422         .writeunlock    = torture_ww_mutex_unlock,
 423         .readlock       = NULL,
 424         .read_delay     = NULL,
 425         .readunlock     = NULL,
 426         .name           = "ww_mutex_lock"
 427 };
 428 
 429 #ifdef CONFIG_RT_MUTEXES
 430 static DEFINE_RT_MUTEX(torture_rtmutex);
 431 
 432 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
 433 {
 434         rt_mutex_lock(&torture_rtmutex);
 435         return 0;
 436 }
 437 
 438 static void torture_rtmutex_boost(struct torture_random_state *trsp)
 439 {
 440         int policy;
 441         struct sched_param param;
 442         const unsigned int factor = 50000; /* yes, quite arbitrary */
 443 
 444         if (!rt_task(current)) {
 445                 /*
 446                  * Boost priority once every ~50k operations. When the
 447                  * task tries to take the lock, the rtmutex it will account
 448                  * for the new priority, and do any corresponding pi-dance.
 449                  */
 450                 if (trsp && !(torture_random(trsp) %
 451                               (cxt.nrealwriters_stress * factor))) {
 452                         policy = SCHED_FIFO;
 453                         param.sched_priority = MAX_RT_PRIO - 1;
 454                 } else /* common case, do nothing */
 455                         return;
 456         } else {
 457                 /*
 458                  * The task will remain boosted for another ~500k operations,
 459                  * then restored back to its original prio, and so forth.
 460                  *
 461                  * When @trsp is nil, we want to force-reset the task for
 462                  * stopping the kthread.
 463                  */
 464                 if (!trsp || !(torture_random(trsp) %
 465                                (cxt.nrealwriters_stress * factor * 2))) {
 466                         policy = SCHED_NORMAL;
 467                         param.sched_priority = 0;
 468                 } else /* common case, do nothing */
 469                         return;
 470         }
 471 
 472         sched_setscheduler_nocheck(current, policy, &param);
 473 }
 474 
 475 static void torture_rtmutex_delay(struct torture_random_state *trsp)
 476 {
 477         const unsigned long shortdelay_us = 2;
 478         const unsigned long longdelay_ms = 100;
 479 
 480         /*
 481          * We want a short delay mostly to emulate likely code, and
 482          * we want a long delay occasionally to force massive contention.
 483          */
 484         if (!(torture_random(trsp) %
 485               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 486                 mdelay(longdelay_ms);
 487         if (!(torture_random(trsp) %
 488               (cxt.nrealwriters_stress * 2 * shortdelay_us)))
 489                 udelay(shortdelay_us);
 490         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 491                 torture_preempt_schedule();  /* Allow test to be preempted. */
 492 }
 493 
 494 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
 495 {
 496         rt_mutex_unlock(&torture_rtmutex);
 497 }
 498 
 499 static struct lock_torture_ops rtmutex_lock_ops = {
 500         .writelock      = torture_rtmutex_lock,
 501         .write_delay    = torture_rtmutex_delay,
 502         .task_boost     = torture_rtmutex_boost,
 503         .writeunlock    = torture_rtmutex_unlock,
 504         .readlock       = NULL,
 505         .read_delay     = NULL,
 506         .readunlock     = NULL,
 507         .name           = "rtmutex_lock"
 508 };
 509 #endif
 510 
 511 static DECLARE_RWSEM(torture_rwsem);
 512 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
 513 {
 514         down_write(&torture_rwsem);
 515         return 0;
 516 }
 517 
 518 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
 519 {
 520         const unsigned long longdelay_ms = 100;
 521 
 522         /* We want a long delay occasionally to force massive contention.  */
 523         if (!(torture_random(trsp) %
 524               (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
 525                 mdelay(longdelay_ms * 10);
 526         else
 527                 mdelay(longdelay_ms / 10);
 528         if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
 529                 torture_preempt_schedule();  /* Allow test to be preempted. */
 530 }
 531 
 532 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
 533 {
 534         up_write(&torture_rwsem);
 535 }
 536 
 537 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
 538 {
 539         down_read(&torture_rwsem);
 540         return 0;
 541 }
 542 
 543 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
 544 {
 545         const unsigned long longdelay_ms = 100;
 546 
 547         /* We want a long delay occasionally to force massive contention.  */
 548         if (!(torture_random(trsp) %
 549               (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
 550                 mdelay(longdelay_ms * 2);
 551         else
 552                 mdelay(longdelay_ms / 2);
 553         if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
 554                 torture_preempt_schedule();  /* Allow test to be preempted. */
 555 }
 556 
 557 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
 558 {
 559         up_read(&torture_rwsem);
 560 }
 561 
 562 static struct lock_torture_ops rwsem_lock_ops = {
 563         .writelock      = torture_rwsem_down_write,
 564         .write_delay    = torture_rwsem_write_delay,
 565         .task_boost     = torture_boost_dummy,
 566         .writeunlock    = torture_rwsem_up_write,
 567         .readlock       = torture_rwsem_down_read,
 568         .read_delay     = torture_rwsem_read_delay,
 569         .readunlock     = torture_rwsem_up_read,
 570         .name           = "rwsem_lock"
 571 };
 572 
 573 #include <linux/percpu-rwsem.h>
 574 static struct percpu_rw_semaphore pcpu_rwsem;
 575 
 576 void torture_percpu_rwsem_init(void)
 577 {
 578         BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
 579 }
 580 
 581 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
 582 {
 583         percpu_down_write(&pcpu_rwsem);
 584         return 0;
 585 }
 586 
 587 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
 588 {
 589         percpu_up_write(&pcpu_rwsem);
 590 }
 591 
 592 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
 593 {
 594         percpu_down_read(&pcpu_rwsem);
 595         return 0;
 596 }
 597 
 598 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
 599 {
 600         percpu_up_read(&pcpu_rwsem);
 601 }
 602 
 603 static struct lock_torture_ops percpu_rwsem_lock_ops = {
 604         .init           = torture_percpu_rwsem_init,
 605         .writelock      = torture_percpu_rwsem_down_write,
 606         .write_delay    = torture_rwsem_write_delay,
 607         .task_boost     = torture_boost_dummy,
 608         .writeunlock    = torture_percpu_rwsem_up_write,
 609         .readlock       = torture_percpu_rwsem_down_read,
 610         .read_delay     = torture_rwsem_read_delay,
 611         .readunlock     = torture_percpu_rwsem_up_read,
 612         .name           = "percpu_rwsem_lock"
 613 };
 614 
 615 /*
 616  * Lock torture writer kthread.  Repeatedly acquires and releases
 617  * the lock, checking for duplicate acquisitions.
 618  */
 619 static int lock_torture_writer(void *arg)
 620 {
 621         struct lock_stress_stats *lwsp = arg;
 622         static DEFINE_TORTURE_RANDOM(rand);
 623 
 624         VERBOSE_TOROUT_STRING("lock_torture_writer task started");
 625         set_user_nice(current, MAX_NICE);
 626 
 627         do {
 628                 if ((torture_random(&rand) & 0xfffff) == 0)
 629                         schedule_timeout_uninterruptible(1);
 630 
 631                 cxt.cur_ops->task_boost(&rand);
 632                 cxt.cur_ops->writelock();
 633                 if (WARN_ON_ONCE(lock_is_write_held))
 634                         lwsp->n_lock_fail++;
 635                 lock_is_write_held = 1;
 636                 if (WARN_ON_ONCE(lock_is_read_held))
 637                         lwsp->n_lock_fail++; /* rare, but... */
 638 
 639                 lwsp->n_lock_acquired++;
 640                 cxt.cur_ops->write_delay(&rand);
 641                 lock_is_write_held = 0;
 642                 cxt.cur_ops->writeunlock();
 643 
 644                 stutter_wait("lock_torture_writer");
 645         } while (!torture_must_stop());
 646 
 647         cxt.cur_ops->task_boost(NULL); /* reset prio */
 648         torture_kthread_stopping("lock_torture_writer");
 649         return 0;
 650 }
 651 
 652 /*
 653  * Lock torture reader kthread.  Repeatedly acquires and releases
 654  * the reader lock.
 655  */
 656 static int lock_torture_reader(void *arg)
 657 {
 658         struct lock_stress_stats *lrsp = arg;
 659         static DEFINE_TORTURE_RANDOM(rand);
 660 
 661         VERBOSE_TOROUT_STRING("lock_torture_reader task started");
 662         set_user_nice(current, MAX_NICE);
 663 
 664         do {
 665                 if ((torture_random(&rand) & 0xfffff) == 0)
 666                         schedule_timeout_uninterruptible(1);
 667 
 668                 cxt.cur_ops->readlock();
 669                 lock_is_read_held = 1;
 670                 if (WARN_ON_ONCE(lock_is_write_held))
 671                         lrsp->n_lock_fail++; /* rare, but... */
 672 
 673                 lrsp->n_lock_acquired++;
 674                 cxt.cur_ops->read_delay(&rand);
 675                 lock_is_read_held = 0;
 676                 cxt.cur_ops->readunlock();
 677 
 678                 stutter_wait("lock_torture_reader");
 679         } while (!torture_must_stop());
 680         torture_kthread_stopping("lock_torture_reader");
 681         return 0;
 682 }
 683 
 684 /*
 685  * Create an lock-torture-statistics message in the specified buffer.
 686  */
 687 static void __torture_print_stats(char *page,
 688                                   struct lock_stress_stats *statp, bool write)
 689 {
 690         bool fail = 0;
 691         int i, n_stress;
 692         long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
 693         long long sum = 0;
 694 
 695         n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
 696         for (i = 0; i < n_stress; i++) {
 697                 if (statp[i].n_lock_fail)
 698                         fail = true;
 699                 sum += statp[i].n_lock_acquired;
 700                 if (max < statp[i].n_lock_acquired)
 701                         max = statp[i].n_lock_acquired;
 702                 if (min > statp[i].n_lock_acquired)
 703                         min = statp[i].n_lock_acquired;
 704         }
 705         page += sprintf(page,
 706                         "%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
 707                         write ? "Writes" : "Reads ",
 708                         sum, max, min, max / 2 > min ? "???" : "",
 709                         fail, fail ? "!!!" : "");
 710         if (fail)
 711                 atomic_inc(&cxt.n_lock_torture_errors);
 712 }
 713 
 714 /*
 715  * Print torture statistics.  Caller must ensure that there is only one
 716  * call to this function at a given time!!!  This is normally accomplished
 717  * by relying on the module system to only have one copy of the module
 718  * loaded, and then by giving the lock_torture_stats kthread full control
 719  * (or the init/cleanup functions when lock_torture_stats thread is not
 720  * running).
 721  */
 722 static void lock_torture_stats_print(void)
 723 {
 724         int size = cxt.nrealwriters_stress * 200 + 8192;
 725         char *buf;
 726 
 727         if (cxt.cur_ops->readlock)
 728                 size += cxt.nrealreaders_stress * 200 + 8192;
 729 
 730         buf = kmalloc(size, GFP_KERNEL);
 731         if (!buf) {
 732                 pr_err("lock_torture_stats_print: Out of memory, need: %d",
 733                        size);
 734                 return;
 735         }
 736 
 737         __torture_print_stats(buf, cxt.lwsa, true);
 738         pr_alert("%s", buf);
 739         kfree(buf);
 740 
 741         if (cxt.cur_ops->readlock) {
 742                 buf = kmalloc(size, GFP_KERNEL);
 743                 if (!buf) {
 744                         pr_err("lock_torture_stats_print: Out of memory, need: %d",
 745                                size);
 746                         return;
 747                 }
 748 
 749                 __torture_print_stats(buf, cxt.lrsa, false);
 750                 pr_alert("%s", buf);
 751                 kfree(buf);
 752         }
 753 }
 754 
 755 /*
 756  * Periodically prints torture statistics, if periodic statistics printing
 757  * was specified via the stat_interval module parameter.
 758  *
 759  * No need to worry about fullstop here, since this one doesn't reference
 760  * volatile state or register callbacks.
 761  */
 762 static int lock_torture_stats(void *arg)
 763 {
 764         VERBOSE_TOROUT_STRING("lock_torture_stats task started");
 765         do {
 766                 schedule_timeout_interruptible(stat_interval * HZ);
 767                 lock_torture_stats_print();
 768                 torture_shutdown_absorb("lock_torture_stats");
 769         } while (!torture_must_stop());
 770         torture_kthread_stopping("lock_torture_stats");
 771         return 0;
 772 }
 773 
 774 static inline void
 775 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
 776                                 const char *tag)
 777 {
 778         pr_alert("%s" TORTURE_FLAG
 779                  "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
 780                  torture_type, tag, cxt.debug_lock ? " [debug]": "",
 781                  cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
 782                  verbose, shuffle_interval, stutter, shutdown_secs,
 783                  onoff_interval, onoff_holdoff);
 784 }
 785 
 786 static void lock_torture_cleanup(void)
 787 {
 788         int i;
 789 
 790         if (torture_cleanup_begin())
 791                 return;
 792 
 793         /*
 794          * Indicates early cleanup, meaning that the test has not run,
 795          * such as when passing bogus args when loading the module. As
 796          * such, only perform the underlying torture-specific cleanups,
 797          * and avoid anything related to locktorture.
 798          */
 799         if (!cxt.lwsa && !cxt.lrsa)
 800                 goto end;
 801 
 802         if (writer_tasks) {
 803                 for (i = 0; i < cxt.nrealwriters_stress; i++)
 804                         torture_stop_kthread(lock_torture_writer,
 805                                              writer_tasks[i]);
 806                 kfree(writer_tasks);
 807                 writer_tasks = NULL;
 808         }
 809 
 810         if (reader_tasks) {
 811                 for (i = 0; i < cxt.nrealreaders_stress; i++)
 812                         torture_stop_kthread(lock_torture_reader,
 813                                              reader_tasks[i]);
 814                 kfree(reader_tasks);
 815                 reader_tasks = NULL;
 816         }
 817 
 818         torture_stop_kthread(lock_torture_stats, stats_task);
 819         lock_torture_stats_print();  /* -After- the stats thread is stopped! */
 820 
 821         if (atomic_read(&cxt.n_lock_torture_errors))
 822                 lock_torture_print_module_parms(cxt.cur_ops,
 823                                                 "End of test: FAILURE");
 824         else if (torture_onoff_failures())
 825                 lock_torture_print_module_parms(cxt.cur_ops,
 826                                                 "End of test: LOCK_HOTPLUG");
 827         else
 828                 lock_torture_print_module_parms(cxt.cur_ops,
 829                                                 "End of test: SUCCESS");
 830 
 831         kfree(cxt.lwsa);
 832         cxt.lwsa = NULL;
 833         kfree(cxt.lrsa);
 834         cxt.lrsa = NULL;
 835 
 836 end:
 837         torture_cleanup_end();
 838 }
 839 
 840 static int __init lock_torture_init(void)
 841 {
 842         int i, j;
 843         int firsterr = 0;
 844         static struct lock_torture_ops *torture_ops[] = {
 845                 &lock_busted_ops,
 846                 &spin_lock_ops, &spin_lock_irq_ops,
 847                 &rw_lock_ops, &rw_lock_irq_ops,
 848                 &mutex_lock_ops,
 849                 &ww_mutex_lock_ops,
 850 #ifdef CONFIG_RT_MUTEXES
 851                 &rtmutex_lock_ops,
 852 #endif
 853                 &rwsem_lock_ops,
 854                 &percpu_rwsem_lock_ops,
 855         };
 856 
 857         if (!torture_init_begin(torture_type, verbose))
 858                 return -EBUSY;
 859 
 860         /* Process args and tell the world that the torturer is on the job. */
 861         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
 862                 cxt.cur_ops = torture_ops[i];
 863                 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
 864                         break;
 865         }
 866         if (i == ARRAY_SIZE(torture_ops)) {
 867                 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
 868                          torture_type);
 869                 pr_alert("lock-torture types:");
 870                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
 871                         pr_alert(" %s", torture_ops[i]->name);
 872                 pr_alert("\n");
 873                 firsterr = -EINVAL;
 874                 goto unwind;
 875         }
 876 
 877         if (nwriters_stress == 0 && nreaders_stress == 0) {
 878                 pr_alert("lock-torture: must run at least one locking thread\n");
 879                 firsterr = -EINVAL;
 880                 goto unwind;
 881         }
 882 
 883         if (cxt.cur_ops->init)
 884                 cxt.cur_ops->init();
 885 
 886         if (nwriters_stress >= 0)
 887                 cxt.nrealwriters_stress = nwriters_stress;
 888         else
 889                 cxt.nrealwriters_stress = 2 * num_online_cpus();
 890 
 891 #ifdef CONFIG_DEBUG_MUTEXES
 892         if (strncmp(torture_type, "mutex", 5) == 0)
 893                 cxt.debug_lock = true;
 894 #endif
 895 #ifdef CONFIG_DEBUG_RT_MUTEXES
 896         if (strncmp(torture_type, "rtmutex", 7) == 0)
 897                 cxt.debug_lock = true;
 898 #endif
 899 #ifdef CONFIG_DEBUG_SPINLOCK
 900         if ((strncmp(torture_type, "spin", 4) == 0) ||
 901             (strncmp(torture_type, "rw_lock", 7) == 0))
 902                 cxt.debug_lock = true;
 903 #endif
 904 
 905         /* Initialize the statistics so that each run gets its own numbers. */
 906         if (nwriters_stress) {
 907                 lock_is_write_held = 0;
 908                 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
 909                                          sizeof(*cxt.lwsa),
 910                                          GFP_KERNEL);
 911                 if (cxt.lwsa == NULL) {
 912                         VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
 913                         firsterr = -ENOMEM;
 914                         goto unwind;
 915                 }
 916 
 917                 for (i = 0; i < cxt.nrealwriters_stress; i++) {
 918                         cxt.lwsa[i].n_lock_fail = 0;
 919                         cxt.lwsa[i].n_lock_acquired = 0;
 920                 }
 921         }
 922 
 923         if (cxt.cur_ops->readlock) {
 924                 if (nreaders_stress >= 0)
 925                         cxt.nrealreaders_stress = nreaders_stress;
 926                 else {
 927                         /*
 928                          * By default distribute evenly the number of
 929                          * readers and writers. We still run the same number
 930                          * of threads as the writer-only locks default.
 931                          */
 932                         if (nwriters_stress < 0) /* user doesn't care */
 933                                 cxt.nrealwriters_stress = num_online_cpus();
 934                         cxt.nrealreaders_stress = cxt.nrealwriters_stress;
 935                 }
 936 
 937                 if (nreaders_stress) {
 938                         lock_is_read_held = 0;
 939                         cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
 940                                                  sizeof(*cxt.lrsa),
 941                                                  GFP_KERNEL);
 942                         if (cxt.lrsa == NULL) {
 943                                 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
 944                                 firsterr = -ENOMEM;
 945                                 kfree(cxt.lwsa);
 946                                 cxt.lwsa = NULL;
 947                                 goto unwind;
 948                         }
 949 
 950                         for (i = 0; i < cxt.nrealreaders_stress; i++) {
 951                                 cxt.lrsa[i].n_lock_fail = 0;
 952                                 cxt.lrsa[i].n_lock_acquired = 0;
 953                         }
 954                 }
 955         }
 956 
 957         lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
 958 
 959         /* Prepare torture context. */
 960         if (onoff_interval > 0) {
 961                 firsterr = torture_onoff_init(onoff_holdoff * HZ,
 962                                               onoff_interval * HZ, NULL);
 963                 if (firsterr)
 964                         goto unwind;
 965         }
 966         if (shuffle_interval > 0) {
 967                 firsterr = torture_shuffle_init(shuffle_interval);
 968                 if (firsterr)
 969                         goto unwind;
 970         }
 971         if (shutdown_secs > 0) {
 972                 firsterr = torture_shutdown_init(shutdown_secs,
 973                                                  lock_torture_cleanup);
 974                 if (firsterr)
 975                         goto unwind;
 976         }
 977         if (stutter > 0) {
 978                 firsterr = torture_stutter_init(stutter, stutter);
 979                 if (firsterr)
 980                         goto unwind;
 981         }
 982 
 983         if (nwriters_stress) {
 984                 writer_tasks = kcalloc(cxt.nrealwriters_stress,
 985                                        sizeof(writer_tasks[0]),
 986                                        GFP_KERNEL);
 987                 if (writer_tasks == NULL) {
 988                         VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
 989                         firsterr = -ENOMEM;
 990                         goto unwind;
 991                 }
 992         }
 993 
 994         if (cxt.cur_ops->readlock) {
 995                 reader_tasks = kcalloc(cxt.nrealreaders_stress,
 996                                        sizeof(reader_tasks[0]),
 997                                        GFP_KERNEL);
 998                 if (reader_tasks == NULL) {
 999                         VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
1000                         kfree(writer_tasks);
1001                         writer_tasks = NULL;
1002                         firsterr = -ENOMEM;
1003                         goto unwind;
1004                 }
1005         }
1006 
1007         /*
1008          * Create the kthreads and start torturing (oh, those poor little locks).
1009          *
1010          * TODO: Note that we interleave writers with readers, giving writers a
1011          * slight advantage, by creating its kthread first. This can be modified
1012          * for very specific needs, or even let the user choose the policy, if
1013          * ever wanted.
1014          */
1015         for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1016                     j < cxt.nrealreaders_stress; i++, j++) {
1017                 if (i >= cxt.nrealwriters_stress)
1018                         goto create_reader;
1019 
1020                 /* Create writer. */
1021                 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1022                                                   writer_tasks[i]);
1023                 if (firsterr)
1024                         goto unwind;
1025 
1026         create_reader:
1027                 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1028                         continue;
1029                 /* Create reader. */
1030                 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1031                                                   reader_tasks[j]);
1032                 if (firsterr)
1033                         goto unwind;
1034         }
1035         if (stat_interval > 0) {
1036                 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1037                                                   stats_task);
1038                 if (firsterr)
1039                         goto unwind;
1040         }
1041         torture_init_end();
1042         return 0;
1043 
1044 unwind:
1045         torture_init_end();
1046         lock_torture_cleanup();
1047         return firsterr;
1048 }
1049 
1050 module_init(lock_torture_init);
1051 module_exit(lock_torture_cleanup);

/* [<][>][^][v][top][bottom][index][help] */