root/arch/x86/include/asm/qspinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. queued_fetch_set_pending_acquire
  2. native_queued_spin_unlock
  3. queued_spin_lock_slowpath
  4. queued_spin_unlock
  5. vcpu_is_preempted
  6. virt_spin_lock
  7. native_pv_lock_init

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_X86_QSPINLOCK_H
   3 #define _ASM_X86_QSPINLOCK_H
   4 
   5 #include <linux/jump_label.h>
   6 #include <asm/cpufeature.h>
   7 #include <asm-generic/qspinlock_types.h>
   8 #include <asm/paravirt.h>
   9 #include <asm/rmwcc.h>
  10 
  11 #define _Q_PENDING_LOOPS        (1 << 9)
  12 
  13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
  14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
  15 {
  16         u32 val;
  17 
  18         /*
  19          * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
  20          * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
  21          * statement expression, which GCC doesn't like.
  22          */
  23         val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
  24                                "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
  25         val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
  26 
  27         return val;
  28 }
  29 
  30 #ifdef CONFIG_PARAVIRT_SPINLOCKS
  31 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  32 extern void __pv_init_lock_hash(void);
  33 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
  34 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
  35 
  36 #define queued_spin_unlock queued_spin_unlock
  37 /**
  38  * queued_spin_unlock - release a queued spinlock
  39  * @lock : Pointer to queued spinlock structure
  40  *
  41  * A smp_store_release() on the least-significant byte.
  42  */
  43 static inline void native_queued_spin_unlock(struct qspinlock *lock)
  44 {
  45         smp_store_release(&lock->locked, 0);
  46 }
  47 
  48 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
  49 {
  50         pv_queued_spin_lock_slowpath(lock, val);
  51 }
  52 
  53 static inline void queued_spin_unlock(struct qspinlock *lock)
  54 {
  55         pv_queued_spin_unlock(lock);
  56 }
  57 
  58 #define vcpu_is_preempted vcpu_is_preempted
  59 static inline bool vcpu_is_preempted(long cpu)
  60 {
  61         return pv_vcpu_is_preempted(cpu);
  62 }
  63 #endif
  64 
  65 #ifdef CONFIG_PARAVIRT
  66 /*
  67  * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
  68  *
  69  * Native (and PV wanting native due to vCPU pinning) should disable this key.
  70  * It is done in this backwards fashion to only have a single direction change,
  71  * which removes ordering between native_pv_spin_init() and HV setup.
  72  */
  73 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
  74 
  75 void native_pv_lock_init(void) __init;
  76 
  77 /*
  78  * Shortcut for the queued_spin_lock_slowpath() function that allows
  79  * virt to hijack it.
  80  *
  81  * Returns:
  82  *   true - lock has been negotiated, all done;
  83  *   false - queued_spin_lock_slowpath() will do its thing.
  84  */
  85 #define virt_spin_lock virt_spin_lock
  86 static inline bool virt_spin_lock(struct qspinlock *lock)
  87 {
  88         if (!static_branch_likely(&virt_spin_lock_key))
  89                 return false;
  90 
  91         /*
  92          * On hypervisors without PARAVIRT_SPINLOCKS support we fall
  93          * back to a Test-and-Set spinlock, because fair locks have
  94          * horrible lock 'holder' preemption issues.
  95          */
  96 
  97         do {
  98                 while (atomic_read(&lock->val) != 0)
  99                         cpu_relax();
 100         } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
 101 
 102         return true;
 103 }
 104 #else
 105 static inline void native_pv_lock_init(void)
 106 {
 107 }
 108 #endif /* CONFIG_PARAVIRT */
 109 
 110 #include <asm-generic/qspinlock.h>
 111 
 112 #endif /* _ASM_X86_QSPINLOCK_H */

/* [<][>][^][v][top][bottom][index][help] */