root/kernel/locking/mutex.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __mutex_init
  2. __mutex_owner
  3. __owner_task
  4. mutex_is_locked
  5. mutex_trylock_recursive
  6. __owner_flags
  7. __mutex_trylock_or_owner
  8. __mutex_trylock
  9. __mutex_trylock_fast
  10. __mutex_unlock_fast
  11. __mutex_set_flag
  12. __mutex_clear_flag
  13. __mutex_waiter_is_first
  14. __mutex_add_waiter
  15. __mutex_handoff
  16. mutex_lock
  17. ww_mutex_lock_acquired
  18. __ww_ctx_stamp_after
  19. __ww_mutex_die
  20. __ww_mutex_wound
  21. __ww_mutex_check_waiters
  22. ww_mutex_set_context_fastpath
  23. ww_mutex_spin_on_owner
  24. mutex_spin_on_owner
  25. mutex_can_spin_on_owner
  26. mutex_optimistic_spin
  27. mutex_optimistic_spin
  28. mutex_unlock
  29. ww_mutex_unlock
  30. __ww_mutex_kill
  31. __ww_mutex_check_kill
  32. __ww_mutex_add_waiter
  33. __mutex_lock_common
  34. __mutex_lock
  35. __ww_mutex_lock
  36. mutex_lock_nested
  37. _mutex_lock_nest_lock
  38. mutex_lock_killable_nested
  39. mutex_lock_interruptible_nested
  40. mutex_lock_io_nested
  41. ww_mutex_deadlock_injection
  42. ww_mutex_lock
  43. ww_mutex_lock_interruptible
  44. __mutex_unlock_slowpath
  45. mutex_lock_interruptible
  46. mutex_lock_killable
  47. mutex_lock_io
  48. __mutex_lock_slowpath
  49. __mutex_lock_killable_slowpath
  50. __mutex_lock_interruptible_slowpath
  51. __ww_mutex_lock_slowpath
  52. __ww_mutex_lock_interruptible_slowpath
  53. mutex_trylock
  54. ww_mutex_lock
  55. ww_mutex_lock_interruptible
  56. atomic_dec_and_mutex_lock

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * kernel/locking/mutex.c
   4  *
   5  * Mutexes: blocking mutual exclusion locks
   6  *
   7  * Started by Ingo Molnar:
   8  *
   9  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  10  *
  11  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  12  * David Howells for suggestions and improvements.
  13  *
  14  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  15  *    from the -rt tree, where it was originally implemented for rtmutexes
  16  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  17  *    and Sven Dietrich.
  18  *
  19  * Also see Documentation/locking/mutex-design.rst.
  20  */
  21 #include <linux/mutex.h>
  22 #include <linux/ww_mutex.h>
  23 #include <linux/sched/signal.h>
  24 #include <linux/sched/rt.h>
  25 #include <linux/sched/wake_q.h>
  26 #include <linux/sched/debug.h>
  27 #include <linux/export.h>
  28 #include <linux/spinlock.h>
  29 #include <linux/interrupt.h>
  30 #include <linux/debug_locks.h>
  31 #include <linux/osq_lock.h>
  32 
  33 #ifdef CONFIG_DEBUG_MUTEXES
  34 # include "mutex-debug.h"
  35 #else
  36 # include "mutex.h"
  37 #endif
  38 
  39 void
  40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  41 {
  42         atomic_long_set(&lock->owner, 0);
  43         spin_lock_init(&lock->wait_lock);
  44         INIT_LIST_HEAD(&lock->wait_list);
  45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  46         osq_lock_init(&lock->osq);
  47 #endif
  48 
  49         debug_mutex_init(lock, name, key);
  50 }
  51 EXPORT_SYMBOL(__mutex_init);
  52 
  53 /*
  54  * @owner: contains: 'struct task_struct *' to the current lock owner,
  55  * NULL means not owned. Since task_struct pointers are aligned at
  56  * at least L1_CACHE_BYTES, we have low bits to store extra state.
  57  *
  58  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
  59  * Bit1 indicates unlock needs to hand the lock to the top-waiter
  60  * Bit2 indicates handoff has been done and we're waiting for pickup.
  61  */
  62 #define MUTEX_FLAG_WAITERS      0x01
  63 #define MUTEX_FLAG_HANDOFF      0x02
  64 #define MUTEX_FLAG_PICKUP       0x04
  65 
  66 #define MUTEX_FLAGS             0x07
  67 
  68 /*
  69  * Internal helper function; C doesn't allow us to hide it :/
  70  *
  71  * DO NOT USE (outside of mutex code).
  72  */
  73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
  74 {
  75         return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
  76 }
  77 
  78 static inline struct task_struct *__owner_task(unsigned long owner)
  79 {
  80         return (struct task_struct *)(owner & ~MUTEX_FLAGS);
  81 }
  82 
  83 bool mutex_is_locked(struct mutex *lock)
  84 {
  85         return __mutex_owner(lock) != NULL;
  86 }
  87 EXPORT_SYMBOL(mutex_is_locked);
  88 
  89 __must_check enum mutex_trylock_recursive_enum
  90 mutex_trylock_recursive(struct mutex *lock)
  91 {
  92         if (unlikely(__mutex_owner(lock) == current))
  93                 return MUTEX_TRYLOCK_RECURSIVE;
  94 
  95         return mutex_trylock(lock);
  96 }
  97 EXPORT_SYMBOL(mutex_trylock_recursive);
  98 
  99 static inline unsigned long __owner_flags(unsigned long owner)
 100 {
 101         return owner & MUTEX_FLAGS;
 102 }
 103 
 104 /*
 105  * Trylock variant that retuns the owning task on failure.
 106  */
 107 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
 108 {
 109         unsigned long owner, curr = (unsigned long)current;
 110 
 111         owner = atomic_long_read(&lock->owner);
 112         for (;;) { /* must loop, can race against a flag */
 113                 unsigned long old, flags = __owner_flags(owner);
 114                 unsigned long task = owner & ~MUTEX_FLAGS;
 115 
 116                 if (task) {
 117                         if (likely(task != curr))
 118                                 break;
 119 
 120                         if (likely(!(flags & MUTEX_FLAG_PICKUP)))
 121                                 break;
 122 
 123                         flags &= ~MUTEX_FLAG_PICKUP;
 124                 } else {
 125 #ifdef CONFIG_DEBUG_MUTEXES
 126                         DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
 127 #endif
 128                 }
 129 
 130                 /*
 131                  * We set the HANDOFF bit, we must make sure it doesn't live
 132                  * past the point where we acquire it. This would be possible
 133                  * if we (accidentally) set the bit on an unlocked mutex.
 134                  */
 135                 flags &= ~MUTEX_FLAG_HANDOFF;
 136 
 137                 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
 138                 if (old == owner)
 139                         return NULL;
 140 
 141                 owner = old;
 142         }
 143 
 144         return __owner_task(owner);
 145 }
 146 
 147 /*
 148  * Actual trylock that will work on any unlocked state.
 149  */
 150 static inline bool __mutex_trylock(struct mutex *lock)
 151 {
 152         return !__mutex_trylock_or_owner(lock);
 153 }
 154 
 155 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 156 /*
 157  * Lockdep annotations are contained to the slow paths for simplicity.
 158  * There is nothing that would stop spreading the lockdep annotations outwards
 159  * except more code.
 160  */
 161 
 162 /*
 163  * Optimistic trylock that only works in the uncontended case. Make sure to
 164  * follow with a __mutex_trylock() before failing.
 165  */
 166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
 167 {
 168         unsigned long curr = (unsigned long)current;
 169         unsigned long zero = 0UL;
 170 
 171         if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
 172                 return true;
 173 
 174         return false;
 175 }
 176 
 177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
 178 {
 179         unsigned long curr = (unsigned long)current;
 180 
 181         if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
 182                 return true;
 183 
 184         return false;
 185 }
 186 #endif
 187 
 188 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
 189 {
 190         atomic_long_or(flag, &lock->owner);
 191 }
 192 
 193 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
 194 {
 195         atomic_long_andnot(flag, &lock->owner);
 196 }
 197 
 198 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
 199 {
 200         return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 201 }
 202 
 203 /*
 204  * Add @waiter to a given location in the lock wait_list and set the
 205  * FLAG_WAITERS flag if it's the first waiter.
 206  */
 207 static void __sched
 208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 209                    struct list_head *list)
 210 {
 211         debug_mutex_add_waiter(lock, waiter, current);
 212 
 213         list_add_tail(&waiter->list, list);
 214         if (__mutex_waiter_is_first(lock, waiter))
 215                 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
 216 }
 217 
 218 /*
 219  * Give up ownership to a specific task, when @task = NULL, this is equivalent
 220  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
 221  * WAITERS. Provides RELEASE semantics like a regular unlock, the
 222  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
 223  */
 224 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
 225 {
 226         unsigned long owner = atomic_long_read(&lock->owner);
 227 
 228         for (;;) {
 229                 unsigned long old, new;
 230 
 231 #ifdef CONFIG_DEBUG_MUTEXES
 232                 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
 233                 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
 234 #endif
 235 
 236                 new = (owner & MUTEX_FLAG_WAITERS);
 237                 new |= (unsigned long)task;
 238                 if (task)
 239                         new |= MUTEX_FLAG_PICKUP;
 240 
 241                 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
 242                 if (old == owner)
 243                         break;
 244 
 245                 owner = old;
 246         }
 247 }
 248 
 249 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 250 /*
 251  * We split the mutex lock/unlock logic into separate fastpath and
 252  * slowpath functions, to reduce the register pressure on the fastpath.
 253  * We also put the fastpath first in the kernel image, to make sure the
 254  * branch is predicted by the CPU as default-untaken.
 255  */
 256 static void __sched __mutex_lock_slowpath(struct mutex *lock);
 257 
 258 /**
 259  * mutex_lock - acquire the mutex
 260  * @lock: the mutex to be acquired
 261  *
 262  * Lock the mutex exclusively for this task. If the mutex is not
 263  * available right now, it will sleep until it can get it.
 264  *
 265  * The mutex must later on be released by the same task that
 266  * acquired it. Recursive locking is not allowed. The task
 267  * may not exit without first unlocking the mutex. Also, kernel
 268  * memory where the mutex resides must not be freed with
 269  * the mutex still locked. The mutex must first be initialized
 270  * (or statically defined) before it can be locked. memset()-ing
 271  * the mutex to 0 is not allowed.
 272  *
 273  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
 274  * checks that will enforce the restrictions and will also do
 275  * deadlock debugging)
 276  *
 277  * This function is similar to (but not equivalent to) down().
 278  */
 279 void __sched mutex_lock(struct mutex *lock)
 280 {
 281         might_sleep();
 282 
 283         if (!__mutex_trylock_fast(lock))
 284                 __mutex_lock_slowpath(lock);
 285 }
 286 EXPORT_SYMBOL(mutex_lock);
 287 #endif
 288 
 289 /*
 290  * Wait-Die:
 291  *   The newer transactions are killed when:
 292  *     It (the new transaction) makes a request for a lock being held
 293  *     by an older transaction.
 294  *
 295  * Wound-Wait:
 296  *   The newer transactions are wounded when:
 297  *     An older transaction makes a request for a lock being held by
 298  *     the newer transaction.
 299  */
 300 
 301 /*
 302  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
 303  * it.
 304  */
 305 static __always_inline void
 306 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 307 {
 308 #ifdef CONFIG_DEBUG_MUTEXES
 309         /*
 310          * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
 311          * but released with a normal mutex_unlock in this call.
 312          *
 313          * This should never happen, always use ww_mutex_unlock.
 314          */
 315         DEBUG_LOCKS_WARN_ON(ww->ctx);
 316 
 317         /*
 318          * Not quite done after calling ww_acquire_done() ?
 319          */
 320         DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
 321 
 322         if (ww_ctx->contending_lock) {
 323                 /*
 324                  * After -EDEADLK you tried to
 325                  * acquire a different ww_mutex? Bad!
 326                  */
 327                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
 328 
 329                 /*
 330                  * You called ww_mutex_lock after receiving -EDEADLK,
 331                  * but 'forgot' to unlock everything else first?
 332                  */
 333                 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
 334                 ww_ctx->contending_lock = NULL;
 335         }
 336 
 337         /*
 338          * Naughty, using a different class will lead to undefined behavior!
 339          */
 340         DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
 341 #endif
 342         ww_ctx->acquired++;
 343         ww->ctx = ww_ctx;
 344 }
 345 
 346 /*
 347  * Determine if context @a is 'after' context @b. IOW, @a is a younger
 348  * transaction than @b and depending on algorithm either needs to wait for
 349  * @b or die.
 350  */
 351 static inline bool __sched
 352 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
 353 {
 354 
 355         return (signed long)(a->stamp - b->stamp) > 0;
 356 }
 357 
 358 /*
 359  * Wait-Die; wake a younger waiter context (when locks held) such that it can
 360  * die.
 361  *
 362  * Among waiters with context, only the first one can have other locks acquired
 363  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
 364  * __ww_mutex_check_kill() wake any but the earliest context.
 365  */
 366 static bool __sched
 367 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
 368                struct ww_acquire_ctx *ww_ctx)
 369 {
 370         if (!ww_ctx->is_wait_die)
 371                 return false;
 372 
 373         if (waiter->ww_ctx->acquired > 0 &&
 374                         __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
 375                 debug_mutex_wake_waiter(lock, waiter);
 376                 wake_up_process(waiter->task);
 377         }
 378 
 379         return true;
 380 }
 381 
 382 /*
 383  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
 384  *
 385  * Wound the lock holder if there are waiters with older transactions than
 386  * the lock holders. Even if multiple waiters may wound the lock holder,
 387  * it's sufficient that only one does.
 388  */
 389 static bool __ww_mutex_wound(struct mutex *lock,
 390                              struct ww_acquire_ctx *ww_ctx,
 391                              struct ww_acquire_ctx *hold_ctx)
 392 {
 393         struct task_struct *owner = __mutex_owner(lock);
 394 
 395         lockdep_assert_held(&lock->wait_lock);
 396 
 397         /*
 398          * Possible through __ww_mutex_add_waiter() when we race with
 399          * ww_mutex_set_context_fastpath(). In that case we'll get here again
 400          * through __ww_mutex_check_waiters().
 401          */
 402         if (!hold_ctx)
 403                 return false;
 404 
 405         /*
 406          * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
 407          * it cannot go away because we'll have FLAG_WAITERS set and hold
 408          * wait_lock.
 409          */
 410         if (!owner)
 411                 return false;
 412 
 413         if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
 414                 hold_ctx->wounded = 1;
 415 
 416                 /*
 417                  * wake_up_process() paired with set_current_state()
 418                  * inserts sufficient barriers to make sure @owner either sees
 419                  * it's wounded in __ww_mutex_check_kill() or has a
 420                  * wakeup pending to re-read the wounded state.
 421                  */
 422                 if (owner != current)
 423                         wake_up_process(owner);
 424 
 425                 return true;
 426         }
 427 
 428         return false;
 429 }
 430 
 431 /*
 432  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
 433  * behind us on the wait-list, check if they need to die, or wound us.
 434  *
 435  * See __ww_mutex_add_waiter() for the list-order construction; basically the
 436  * list is ordered by stamp, smallest (oldest) first.
 437  *
 438  * This relies on never mixing wait-die/wound-wait on the same wait-list;
 439  * which is currently ensured by that being a ww_class property.
 440  *
 441  * The current task must not be on the wait list.
 442  */
 443 static void __sched
 444 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 445 {
 446         struct mutex_waiter *cur;
 447 
 448         lockdep_assert_held(&lock->wait_lock);
 449 
 450         list_for_each_entry(cur, &lock->wait_list, list) {
 451                 if (!cur->ww_ctx)
 452                         continue;
 453 
 454                 if (__ww_mutex_die(lock, cur, ww_ctx) ||
 455                     __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
 456                         break;
 457         }
 458 }
 459 
 460 /*
 461  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
 462  * and wake up any waiters so they can recheck.
 463  */
 464 static __always_inline void
 465 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 466 {
 467         ww_mutex_lock_acquired(lock, ctx);
 468 
 469         /*
 470          * The lock->ctx update should be visible on all cores before
 471          * the WAITERS check is done, otherwise contended waiters might be
 472          * missed. The contended waiters will either see ww_ctx == NULL
 473          * and keep spinning, or it will acquire wait_lock, add itself
 474          * to waiter list and sleep.
 475          */
 476         smp_mb(); /* See comments above and below. */
 477 
 478         /*
 479          * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
 480          *     MB                       MB
 481          * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
 482          *
 483          * The memory barrier above pairs with the memory barrier in
 484          * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
 485          * and/or !empty list.
 486          */
 487         if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
 488                 return;
 489 
 490         /*
 491          * Uh oh, we raced in fastpath, check if any of the waiters need to
 492          * die or wound us.
 493          */
 494         spin_lock(&lock->base.wait_lock);
 495         __ww_mutex_check_waiters(&lock->base, ctx);
 496         spin_unlock(&lock->base.wait_lock);
 497 }
 498 
 499 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 500 
 501 static inline
 502 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 503                             struct mutex_waiter *waiter)
 504 {
 505         struct ww_mutex *ww;
 506 
 507         ww = container_of(lock, struct ww_mutex, base);
 508 
 509         /*
 510          * If ww->ctx is set the contents are undefined, only
 511          * by acquiring wait_lock there is a guarantee that
 512          * they are not invalid when reading.
 513          *
 514          * As such, when deadlock detection needs to be
 515          * performed the optimistic spinning cannot be done.
 516          *
 517          * Check this in every inner iteration because we may
 518          * be racing against another thread's ww_mutex_lock.
 519          */
 520         if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
 521                 return false;
 522 
 523         /*
 524          * If we aren't on the wait list yet, cancel the spin
 525          * if there are waiters. We want  to avoid stealing the
 526          * lock from a waiter with an earlier stamp, since the
 527          * other thread may already own a lock that we also
 528          * need.
 529          */
 530         if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
 531                 return false;
 532 
 533         /*
 534          * Similarly, stop spinning if we are no longer the
 535          * first waiter.
 536          */
 537         if (waiter && !__mutex_waiter_is_first(lock, waiter))
 538                 return false;
 539 
 540         return true;
 541 }
 542 
 543 /*
 544  * Look out! "owner" is an entirely speculative pointer access and not
 545  * reliable.
 546  *
 547  * "noinline" so that this function shows up on perf profiles.
 548  */
 549 static noinline
 550 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
 551                          struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
 552 {
 553         bool ret = true;
 554 
 555         rcu_read_lock();
 556         while (__mutex_owner(lock) == owner) {
 557                 /*
 558                  * Ensure we emit the owner->on_cpu, dereference _after_
 559                  * checking lock->owner still matches owner. If that fails,
 560                  * owner might point to freed memory. If it still matches,
 561                  * the rcu_read_lock() ensures the memory stays valid.
 562                  */
 563                 barrier();
 564 
 565                 /*
 566                  * Use vcpu_is_preempted to detect lock holder preemption issue.
 567                  */
 568                 if (!owner->on_cpu || need_resched() ||
 569                                 vcpu_is_preempted(task_cpu(owner))) {
 570                         ret = false;
 571                         break;
 572                 }
 573 
 574                 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
 575                         ret = false;
 576                         break;
 577                 }
 578 
 579                 cpu_relax();
 580         }
 581         rcu_read_unlock();
 582 
 583         return ret;
 584 }
 585 
 586 /*
 587  * Initial check for entering the mutex spinning loop
 588  */
 589 static inline int mutex_can_spin_on_owner(struct mutex *lock)
 590 {
 591         struct task_struct *owner;
 592         int retval = 1;
 593 
 594         if (need_resched())
 595                 return 0;
 596 
 597         rcu_read_lock();
 598         owner = __mutex_owner(lock);
 599 
 600         /*
 601          * As lock holder preemption issue, we both skip spinning if task is not
 602          * on cpu or its cpu is preempted
 603          */
 604         if (owner)
 605                 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
 606         rcu_read_unlock();
 607 
 608         /*
 609          * If lock->owner is not set, the mutex has been released. Return true
 610          * such that we'll trylock in the spin path, which is a faster option
 611          * than the blocking slow path.
 612          */
 613         return retval;
 614 }
 615 
 616 /*
 617  * Optimistic spinning.
 618  *
 619  * We try to spin for acquisition when we find that the lock owner
 620  * is currently running on a (different) CPU and while we don't
 621  * need to reschedule. The rationale is that if the lock owner is
 622  * running, it is likely to release the lock soon.
 623  *
 624  * The mutex spinners are queued up using MCS lock so that only one
 625  * spinner can compete for the mutex. However, if mutex spinning isn't
 626  * going to happen, there is no point in going through the lock/unlock
 627  * overhead.
 628  *
 629  * Returns true when the lock was taken, otherwise false, indicating
 630  * that we need to jump to the slowpath and sleep.
 631  *
 632  * The waiter flag is set to true if the spinner is a waiter in the wait
 633  * queue. The waiter-spinner will spin on the lock directly and concurrently
 634  * with the spinner at the head of the OSQ, if present, until the owner is
 635  * changed to itself.
 636  */
 637 static __always_inline bool
 638 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 639                       const bool use_ww_ctx, struct mutex_waiter *waiter)
 640 {
 641         if (!waiter) {
 642                 /*
 643                  * The purpose of the mutex_can_spin_on_owner() function is
 644                  * to eliminate the overhead of osq_lock() and osq_unlock()
 645                  * in case spinning isn't possible. As a waiter-spinner
 646                  * is not going to take OSQ lock anyway, there is no need
 647                  * to call mutex_can_spin_on_owner().
 648                  */
 649                 if (!mutex_can_spin_on_owner(lock))
 650                         goto fail;
 651 
 652                 /*
 653                  * In order to avoid a stampede of mutex spinners trying to
 654                  * acquire the mutex all at once, the spinners need to take a
 655                  * MCS (queued) lock first before spinning on the owner field.
 656                  */
 657                 if (!osq_lock(&lock->osq))
 658                         goto fail;
 659         }
 660 
 661         for (;;) {
 662                 struct task_struct *owner;
 663 
 664                 /* Try to acquire the mutex... */
 665                 owner = __mutex_trylock_or_owner(lock);
 666                 if (!owner)
 667                         break;
 668 
 669                 /*
 670                  * There's an owner, wait for it to either
 671                  * release the lock or go to sleep.
 672                  */
 673                 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
 674                         goto fail_unlock;
 675 
 676                 /*
 677                  * The cpu_relax() call is a compiler barrier which forces
 678                  * everything in this loop to be re-loaded. We don't need
 679                  * memory barriers as we'll eventually observe the right
 680                  * values at the cost of a few extra spins.
 681                  */
 682                 cpu_relax();
 683         }
 684 
 685         if (!waiter)
 686                 osq_unlock(&lock->osq);
 687 
 688         return true;
 689 
 690 
 691 fail_unlock:
 692         if (!waiter)
 693                 osq_unlock(&lock->osq);
 694 
 695 fail:
 696         /*
 697          * If we fell out of the spin path because of need_resched(),
 698          * reschedule now, before we try-lock the mutex. This avoids getting
 699          * scheduled out right after we obtained the mutex.
 700          */
 701         if (need_resched()) {
 702                 /*
 703                  * We _should_ have TASK_RUNNING here, but just in case
 704                  * we do not, make it so, otherwise we might get stuck.
 705                  */
 706                 __set_current_state(TASK_RUNNING);
 707                 schedule_preempt_disabled();
 708         }
 709 
 710         return false;
 711 }
 712 #else
 713 static __always_inline bool
 714 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
 715                       const bool use_ww_ctx, struct mutex_waiter *waiter)
 716 {
 717         return false;
 718 }
 719 #endif
 720 
 721 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
 722 
 723 /**
 724  * mutex_unlock - release the mutex
 725  * @lock: the mutex to be released
 726  *
 727  * Unlock a mutex that has been locked by this task previously.
 728  *
 729  * This function must not be used in interrupt context. Unlocking
 730  * of a not locked mutex is not allowed.
 731  *
 732  * This function is similar to (but not equivalent to) up().
 733  */
 734 void __sched mutex_unlock(struct mutex *lock)
 735 {
 736 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 737         if (__mutex_unlock_fast(lock))
 738                 return;
 739 #endif
 740         __mutex_unlock_slowpath(lock, _RET_IP_);
 741 }
 742 EXPORT_SYMBOL(mutex_unlock);
 743 
 744 /**
 745  * ww_mutex_unlock - release the w/w mutex
 746  * @lock: the mutex to be released
 747  *
 748  * Unlock a mutex that has been locked by this task previously with any of the
 749  * ww_mutex_lock* functions (with or without an acquire context). It is
 750  * forbidden to release the locks after releasing the acquire context.
 751  *
 752  * This function must not be used in interrupt context. Unlocking
 753  * of a unlocked mutex is not allowed.
 754  */
 755 void __sched ww_mutex_unlock(struct ww_mutex *lock)
 756 {
 757         /*
 758          * The unlocking fastpath is the 0->1 transition from 'locked'
 759          * into 'unlocked' state:
 760          */
 761         if (lock->ctx) {
 762 #ifdef CONFIG_DEBUG_MUTEXES
 763                 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
 764 #endif
 765                 if (lock->ctx->acquired > 0)
 766                         lock->ctx->acquired--;
 767                 lock->ctx = NULL;
 768         }
 769 
 770         mutex_unlock(&lock->base);
 771 }
 772 EXPORT_SYMBOL(ww_mutex_unlock);
 773 
 774 
 775 static __always_inline int __sched
 776 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 777 {
 778         if (ww_ctx->acquired > 0) {
 779 #ifdef CONFIG_DEBUG_MUTEXES
 780                 struct ww_mutex *ww;
 781 
 782                 ww = container_of(lock, struct ww_mutex, base);
 783                 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
 784                 ww_ctx->contending_lock = ww;
 785 #endif
 786                 return -EDEADLK;
 787         }
 788 
 789         return 0;
 790 }
 791 
 792 
 793 /*
 794  * Check the wound condition for the current lock acquire.
 795  *
 796  * Wound-Wait: If we're wounded, kill ourself.
 797  *
 798  * Wait-Die: If we're trying to acquire a lock already held by an older
 799  *           context, kill ourselves.
 800  *
 801  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
 802  * look at waiters before us in the wait-list.
 803  */
 804 static inline int __sched
 805 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
 806                       struct ww_acquire_ctx *ctx)
 807 {
 808         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
 809         struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
 810         struct mutex_waiter *cur;
 811 
 812         if (ctx->acquired == 0)
 813                 return 0;
 814 
 815         if (!ctx->is_wait_die) {
 816                 if (ctx->wounded)
 817                         return __ww_mutex_kill(lock, ctx);
 818 
 819                 return 0;
 820         }
 821 
 822         if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
 823                 return __ww_mutex_kill(lock, ctx);
 824 
 825         /*
 826          * If there is a waiter in front of us that has a context, then its
 827          * stamp is earlier than ours and we must kill ourself.
 828          */
 829         cur = waiter;
 830         list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
 831                 if (!cur->ww_ctx)
 832                         continue;
 833 
 834                 return __ww_mutex_kill(lock, ctx);
 835         }
 836 
 837         return 0;
 838 }
 839 
 840 /*
 841  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
 842  * first. Such that older contexts are preferred to acquire the lock over
 843  * younger contexts.
 844  *
 845  * Waiters without context are interspersed in FIFO order.
 846  *
 847  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
 848  * older contexts already waiting) to avoid unnecessary waiting and for
 849  * Wound-Wait ensure we wound the owning context when it is younger.
 850  */
 851 static inline int __sched
 852 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 853                       struct mutex *lock,
 854                       struct ww_acquire_ctx *ww_ctx)
 855 {
 856         struct mutex_waiter *cur;
 857         struct list_head *pos;
 858         bool is_wait_die;
 859 
 860         if (!ww_ctx) {
 861                 __mutex_add_waiter(lock, waiter, &lock->wait_list);
 862                 return 0;
 863         }
 864 
 865         is_wait_die = ww_ctx->is_wait_die;
 866 
 867         /*
 868          * Add the waiter before the first waiter with a higher stamp.
 869          * Waiters without a context are skipped to avoid starving
 870          * them. Wait-Die waiters may die here. Wound-Wait waiters
 871          * never die here, but they are sorted in stamp order and
 872          * may wound the lock holder.
 873          */
 874         pos = &lock->wait_list;
 875         list_for_each_entry_reverse(cur, &lock->wait_list, list) {
 876                 if (!cur->ww_ctx)
 877                         continue;
 878 
 879                 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
 880                         /*
 881                          * Wait-Die: if we find an older context waiting, there
 882                          * is no point in queueing behind it, as we'd have to
 883                          * die the moment it would acquire the lock.
 884                          */
 885                         if (is_wait_die) {
 886                                 int ret = __ww_mutex_kill(lock, ww_ctx);
 887 
 888                                 if (ret)
 889                                         return ret;
 890                         }
 891 
 892                         break;
 893                 }
 894 
 895                 pos = &cur->list;
 896 
 897                 /* Wait-Die: ensure younger waiters die. */
 898                 __ww_mutex_die(lock, cur, ww_ctx);
 899         }
 900 
 901         __mutex_add_waiter(lock, waiter, pos);
 902 
 903         /*
 904          * Wound-Wait: if we're blocking on a mutex owned by a younger context,
 905          * wound that such that we might proceed.
 906          */
 907         if (!is_wait_die) {
 908                 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
 909 
 910                 /*
 911                  * See ww_mutex_set_context_fastpath(). Orders setting
 912                  * MUTEX_FLAG_WAITERS vs the ww->ctx load,
 913                  * such that either we or the fastpath will wound @ww->ctx.
 914                  */
 915                 smp_mb();
 916                 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
 917         }
 918 
 919         return 0;
 920 }
 921 
 922 /*
 923  * Lock a mutex (possibly interruptible), slowpath:
 924  */
 925 static __always_inline int __sched
 926 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 927                     struct lockdep_map *nest_lock, unsigned long ip,
 928                     struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
 929 {
 930         struct mutex_waiter waiter;
 931         bool first = false;
 932         struct ww_mutex *ww;
 933         int ret;
 934 
 935         might_sleep();
 936 
 937 #ifdef CONFIG_DEBUG_MUTEXES
 938         DEBUG_LOCKS_WARN_ON(lock->magic != lock);
 939 #endif
 940 
 941         ww = container_of(lock, struct ww_mutex, base);
 942         if (use_ww_ctx && ww_ctx) {
 943                 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
 944                         return -EALREADY;
 945 
 946                 /*
 947                  * Reset the wounded flag after a kill. No other process can
 948                  * race and wound us here since they can't have a valid owner
 949                  * pointer if we don't have any locks held.
 950                  */
 951                 if (ww_ctx->acquired == 0)
 952                         ww_ctx->wounded = 0;
 953         }
 954 
 955         preempt_disable();
 956         mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 957 
 958         if (__mutex_trylock(lock) ||
 959             mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
 960                 /* got the lock, yay! */
 961                 lock_acquired(&lock->dep_map, ip);
 962                 if (use_ww_ctx && ww_ctx)
 963                         ww_mutex_set_context_fastpath(ww, ww_ctx);
 964                 preempt_enable();
 965                 return 0;
 966         }
 967 
 968         spin_lock(&lock->wait_lock);
 969         /*
 970          * After waiting to acquire the wait_lock, try again.
 971          */
 972         if (__mutex_trylock(lock)) {
 973                 if (use_ww_ctx && ww_ctx)
 974                         __ww_mutex_check_waiters(lock, ww_ctx);
 975 
 976                 goto skip_wait;
 977         }
 978 
 979         debug_mutex_lock_common(lock, &waiter);
 980 
 981         lock_contended(&lock->dep_map, ip);
 982 
 983         if (!use_ww_ctx) {
 984                 /* add waiting tasks to the end of the waitqueue (FIFO): */
 985                 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
 986 
 987 
 988 #ifdef CONFIG_DEBUG_MUTEXES
 989                 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
 990 #endif
 991         } else {
 992                 /*
 993                  * Add in stamp order, waking up waiters that must kill
 994                  * themselves.
 995                  */
 996                 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
 997                 if (ret)
 998                         goto err_early_kill;
 999 
1000                 waiter.ww_ctx = ww_ctx;
1001         }
1002 
1003         waiter.task = current;
1004 
1005         set_current_state(state);
1006         for (;;) {
1007                 /*
1008                  * Once we hold wait_lock, we're serialized against
1009                  * mutex_unlock() handing the lock off to us, do a trylock
1010                  * before testing the error conditions to make sure we pick up
1011                  * the handoff.
1012                  */
1013                 if (__mutex_trylock(lock))
1014                         goto acquired;
1015 
1016                 /*
1017                  * Check for signals and kill conditions while holding
1018                  * wait_lock. This ensures the lock cancellation is ordered
1019                  * against mutex_unlock() and wake-ups do not go missing.
1020                  */
1021                 if (signal_pending_state(state, current)) {
1022                         ret = -EINTR;
1023                         goto err;
1024                 }
1025 
1026                 if (use_ww_ctx && ww_ctx) {
1027                         ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1028                         if (ret)
1029                                 goto err;
1030                 }
1031 
1032                 spin_unlock(&lock->wait_lock);
1033                 schedule_preempt_disabled();
1034 
1035                 /*
1036                  * ww_mutex needs to always recheck its position since its waiter
1037                  * list is not FIFO ordered.
1038                  */
1039                 if ((use_ww_ctx && ww_ctx) || !first) {
1040                         first = __mutex_waiter_is_first(lock, &waiter);
1041                         if (first)
1042                                 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1043                 }
1044 
1045                 set_current_state(state);
1046                 /*
1047                  * Here we order against unlock; we must either see it change
1048                  * state back to RUNNING and fall through the next schedule(),
1049                  * or we must see its unlock and acquire.
1050                  */
1051                 if (__mutex_trylock(lock) ||
1052                     (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1053                         break;
1054 
1055                 spin_lock(&lock->wait_lock);
1056         }
1057         spin_lock(&lock->wait_lock);
1058 acquired:
1059         __set_current_state(TASK_RUNNING);
1060 
1061         if (use_ww_ctx && ww_ctx) {
1062                 /*
1063                  * Wound-Wait; we stole the lock (!first_waiter), check the
1064                  * waiters as anyone might want to wound us.
1065                  */
1066                 if (!ww_ctx->is_wait_die &&
1067                     !__mutex_waiter_is_first(lock, &waiter))
1068                         __ww_mutex_check_waiters(lock, ww_ctx);
1069         }
1070 
1071         mutex_remove_waiter(lock, &waiter, current);
1072         if (likely(list_empty(&lock->wait_list)))
1073                 __mutex_clear_flag(lock, MUTEX_FLAGS);
1074 
1075         debug_mutex_free_waiter(&waiter);
1076 
1077 skip_wait:
1078         /* got the lock - cleanup and rejoice! */
1079         lock_acquired(&lock->dep_map, ip);
1080 
1081         if (use_ww_ctx && ww_ctx)
1082                 ww_mutex_lock_acquired(ww, ww_ctx);
1083 
1084         spin_unlock(&lock->wait_lock);
1085         preempt_enable();
1086         return 0;
1087 
1088 err:
1089         __set_current_state(TASK_RUNNING);
1090         mutex_remove_waiter(lock, &waiter, current);
1091 err_early_kill:
1092         spin_unlock(&lock->wait_lock);
1093         debug_mutex_free_waiter(&waiter);
1094         mutex_release(&lock->dep_map, 1, ip);
1095         preempt_enable();
1096         return ret;
1097 }
1098 
1099 static int __sched
1100 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1101              struct lockdep_map *nest_lock, unsigned long ip)
1102 {
1103         return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1104 }
1105 
1106 static int __sched
1107 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1108                 struct lockdep_map *nest_lock, unsigned long ip,
1109                 struct ww_acquire_ctx *ww_ctx)
1110 {
1111         return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1112 }
1113 
1114 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1115 void __sched
1116 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1117 {
1118         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1119 }
1120 
1121 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1122 
1123 void __sched
1124 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1125 {
1126         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1127 }
1128 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1129 
1130 int __sched
1131 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1132 {
1133         return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1134 }
1135 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1136 
1137 int __sched
1138 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1139 {
1140         return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1141 }
1142 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1143 
1144 void __sched
1145 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1146 {
1147         int token;
1148 
1149         might_sleep();
1150 
1151         token = io_schedule_prepare();
1152         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1153                             subclass, NULL, _RET_IP_, NULL, 0);
1154         io_schedule_finish(token);
1155 }
1156 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1157 
1158 static inline int
1159 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1160 {
1161 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1162         unsigned tmp;
1163 
1164         if (ctx->deadlock_inject_countdown-- == 0) {
1165                 tmp = ctx->deadlock_inject_interval;
1166                 if (tmp > UINT_MAX/4)
1167                         tmp = UINT_MAX;
1168                 else
1169                         tmp = tmp*2 + tmp + tmp/2;
1170 
1171                 ctx->deadlock_inject_interval = tmp;
1172                 ctx->deadlock_inject_countdown = tmp;
1173                 ctx->contending_lock = lock;
1174 
1175                 ww_mutex_unlock(lock);
1176 
1177                 return -EDEADLK;
1178         }
1179 #endif
1180 
1181         return 0;
1182 }
1183 
1184 int __sched
1185 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1186 {
1187         int ret;
1188 
1189         might_sleep();
1190         ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1191                                0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1192                                ctx);
1193         if (!ret && ctx && ctx->acquired > 1)
1194                 return ww_mutex_deadlock_injection(lock, ctx);
1195 
1196         return ret;
1197 }
1198 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1199 
1200 int __sched
1201 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1202 {
1203         int ret;
1204 
1205         might_sleep();
1206         ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1207                               0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1208                               ctx);
1209 
1210         if (!ret && ctx && ctx->acquired > 1)
1211                 return ww_mutex_deadlock_injection(lock, ctx);
1212 
1213         return ret;
1214 }
1215 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1216 
1217 #endif
1218 
1219 /*
1220  * Release the lock, slowpath:
1221  */
1222 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1223 {
1224         struct task_struct *next = NULL;
1225         DEFINE_WAKE_Q(wake_q);
1226         unsigned long owner;
1227 
1228         mutex_release(&lock->dep_map, 1, ip);
1229 
1230         /*
1231          * Release the lock before (potentially) taking the spinlock such that
1232          * other contenders can get on with things ASAP.
1233          *
1234          * Except when HANDOFF, in that case we must not clear the owner field,
1235          * but instead set it to the top waiter.
1236          */
1237         owner = atomic_long_read(&lock->owner);
1238         for (;;) {
1239                 unsigned long old;
1240 
1241 #ifdef CONFIG_DEBUG_MUTEXES
1242                 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1243                 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1244 #endif
1245 
1246                 if (owner & MUTEX_FLAG_HANDOFF)
1247                         break;
1248 
1249                 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1250                                                   __owner_flags(owner));
1251                 if (old == owner) {
1252                         if (owner & MUTEX_FLAG_WAITERS)
1253                                 break;
1254 
1255                         return;
1256                 }
1257 
1258                 owner = old;
1259         }
1260 
1261         spin_lock(&lock->wait_lock);
1262         debug_mutex_unlock(lock);
1263         if (!list_empty(&lock->wait_list)) {
1264                 /* get the first entry from the wait-list: */
1265                 struct mutex_waiter *waiter =
1266                         list_first_entry(&lock->wait_list,
1267                                          struct mutex_waiter, list);
1268 
1269                 next = waiter->task;
1270 
1271                 debug_mutex_wake_waiter(lock, waiter);
1272                 wake_q_add(&wake_q, next);
1273         }
1274 
1275         if (owner & MUTEX_FLAG_HANDOFF)
1276                 __mutex_handoff(lock, next);
1277 
1278         spin_unlock(&lock->wait_lock);
1279 
1280         wake_up_q(&wake_q);
1281 }
1282 
1283 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1284 /*
1285  * Here come the less common (and hence less performance-critical) APIs:
1286  * mutex_lock_interruptible() and mutex_trylock().
1287  */
1288 static noinline int __sched
1289 __mutex_lock_killable_slowpath(struct mutex *lock);
1290 
1291 static noinline int __sched
1292 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1293 
1294 /**
1295  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1296  * @lock: The mutex to be acquired.
1297  *
1298  * Lock the mutex like mutex_lock().  If a signal is delivered while the
1299  * process is sleeping, this function will return without acquiring the
1300  * mutex.
1301  *
1302  * Context: Process context.
1303  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1304  * signal arrived.
1305  */
1306 int __sched mutex_lock_interruptible(struct mutex *lock)
1307 {
1308         might_sleep();
1309 
1310         if (__mutex_trylock_fast(lock))
1311                 return 0;
1312 
1313         return __mutex_lock_interruptible_slowpath(lock);
1314 }
1315 
1316 EXPORT_SYMBOL(mutex_lock_interruptible);
1317 
1318 /**
1319  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1320  * @lock: The mutex to be acquired.
1321  *
1322  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
1323  * the current process is delivered while the process is sleeping, this
1324  * function will return without acquiring the mutex.
1325  *
1326  * Context: Process context.
1327  * Return: 0 if the lock was successfully acquired or %-EINTR if a
1328  * fatal signal arrived.
1329  */
1330 int __sched mutex_lock_killable(struct mutex *lock)
1331 {
1332         might_sleep();
1333 
1334         if (__mutex_trylock_fast(lock))
1335                 return 0;
1336 
1337         return __mutex_lock_killable_slowpath(lock);
1338 }
1339 EXPORT_SYMBOL(mutex_lock_killable);
1340 
1341 /**
1342  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1343  * @lock: The mutex to be acquired.
1344  *
1345  * Lock the mutex like mutex_lock().  While the task is waiting for this
1346  * mutex, it will be accounted as being in the IO wait state by the
1347  * scheduler.
1348  *
1349  * Context: Process context.
1350  */
1351 void __sched mutex_lock_io(struct mutex *lock)
1352 {
1353         int token;
1354 
1355         token = io_schedule_prepare();
1356         mutex_lock(lock);
1357         io_schedule_finish(token);
1358 }
1359 EXPORT_SYMBOL_GPL(mutex_lock_io);
1360 
1361 static noinline void __sched
1362 __mutex_lock_slowpath(struct mutex *lock)
1363 {
1364         __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1365 }
1366 
1367 static noinline int __sched
1368 __mutex_lock_killable_slowpath(struct mutex *lock)
1369 {
1370         return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1371 }
1372 
1373 static noinline int __sched
1374 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1375 {
1376         return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1377 }
1378 
1379 static noinline int __sched
1380 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1381 {
1382         return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1383                                _RET_IP_, ctx);
1384 }
1385 
1386 static noinline int __sched
1387 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1388                                             struct ww_acquire_ctx *ctx)
1389 {
1390         return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1391                                _RET_IP_, ctx);
1392 }
1393 
1394 #endif
1395 
1396 /**
1397  * mutex_trylock - try to acquire the mutex, without waiting
1398  * @lock: the mutex to be acquired
1399  *
1400  * Try to acquire the mutex atomically. Returns 1 if the mutex
1401  * has been acquired successfully, and 0 on contention.
1402  *
1403  * NOTE: this function follows the spin_trylock() convention, so
1404  * it is negated from the down_trylock() return values! Be careful
1405  * about this when converting semaphore users to mutexes.
1406  *
1407  * This function must not be used in interrupt context. The
1408  * mutex must be released by the same task that acquired it.
1409  */
1410 int __sched mutex_trylock(struct mutex *lock)
1411 {
1412         bool locked;
1413 
1414 #ifdef CONFIG_DEBUG_MUTEXES
1415         DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1416 #endif
1417 
1418         locked = __mutex_trylock(lock);
1419         if (locked)
1420                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1421 
1422         return locked;
1423 }
1424 EXPORT_SYMBOL(mutex_trylock);
1425 
1426 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1427 int __sched
1428 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1429 {
1430         might_sleep();
1431 
1432         if (__mutex_trylock_fast(&lock->base)) {
1433                 if (ctx)
1434                         ww_mutex_set_context_fastpath(lock, ctx);
1435                 return 0;
1436         }
1437 
1438         return __ww_mutex_lock_slowpath(lock, ctx);
1439 }
1440 EXPORT_SYMBOL(ww_mutex_lock);
1441 
1442 int __sched
1443 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1444 {
1445         might_sleep();
1446 
1447         if (__mutex_trylock_fast(&lock->base)) {
1448                 if (ctx)
1449                         ww_mutex_set_context_fastpath(lock, ctx);
1450                 return 0;
1451         }
1452 
1453         return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1454 }
1455 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1456 
1457 #endif
1458 
1459 /**
1460  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1461  * @cnt: the atomic which we are to dec
1462  * @lock: the mutex to return holding if we dec to 0
1463  *
1464  * return true and hold lock if we dec to 0, return false otherwise
1465  */
1466 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1467 {
1468         /* dec if we can't possibly hit 0 */
1469         if (atomic_add_unless(cnt, -1, 1))
1470                 return 0;
1471         /* we might hit 0, so take the lock */
1472         mutex_lock(lock);
1473         if (!atomic_dec_and_test(cnt)) {
1474                 /* when we actually did the dec, we didn't hit 0 */
1475                 mutex_unlock(lock);
1476                 return 0;
1477         }
1478         /* we hit 0, and we hold the lock */
1479         return 1;
1480 }
1481 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);

/* [<][>][^][v][top][bottom][index][help] */