This source file includes following definitions.
- queued_fetch_set_pending_acquire
- native_queued_spin_unlock
- queued_spin_lock_slowpath
- queued_spin_unlock
- vcpu_is_preempted
- virt_spin_lock
- native_pv_lock_init
1
2 #ifndef _ASM_X86_QSPINLOCK_H
3 #define _ASM_X86_QSPINLOCK_H
4
5 #include <linux/jump_label.h>
6 #include <asm/cpufeature.h>
7 #include <asm-generic/qspinlock_types.h>
8 #include <asm/paravirt.h>
9 #include <asm/rmwcc.h>
10
11 #define _Q_PENDING_LOOPS (1 << 9)
12
13 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15 {
16 u32 val;
17
18
19
20
21
22
23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
24 "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
25 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
26
27 return val;
28 }
29
30 #ifdef CONFIG_PARAVIRT_SPINLOCKS
31 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
32 extern void __pv_init_lock_hash(void);
33 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
35
36 #define queued_spin_unlock queued_spin_unlock
37
38
39
40
41
42
43 static inline void native_queued_spin_unlock(struct qspinlock *lock)
44 {
45 smp_store_release(&lock->locked, 0);
46 }
47
48 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
49 {
50 pv_queued_spin_lock_slowpath(lock, val);
51 }
52
53 static inline void queued_spin_unlock(struct qspinlock *lock)
54 {
55 pv_queued_spin_unlock(lock);
56 }
57
58 #define vcpu_is_preempted vcpu_is_preempted
59 static inline bool vcpu_is_preempted(long cpu)
60 {
61 return pv_vcpu_is_preempted(cpu);
62 }
63 #endif
64
65 #ifdef CONFIG_PARAVIRT
66
67
68
69
70
71
72
73 DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
74
75 void native_pv_lock_init(void) __init;
76
77
78
79
80
81
82
83
84
85 #define virt_spin_lock virt_spin_lock
86 static inline bool virt_spin_lock(struct qspinlock *lock)
87 {
88 if (!static_branch_likely(&virt_spin_lock_key))
89 return false;
90
91
92
93
94
95
96
97 do {
98 while (atomic_read(&lock->val) != 0)
99 cpu_relax();
100 } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
101
102 return true;
103 }
104 #else
105 static inline void native_pv_lock_init(void)
106 {
107 }
108 #endif
109
110 #include <asm-generic/qspinlock.h>
111
112 #endif