This source file includes following definitions.
- arch_atomic64_read
- arch_atomic64_set
- arch_atomic64_add
- arch_atomic64_sub
- arch_atomic64_sub_and_test
- arch_atomic64_inc
- arch_atomic64_dec
- arch_atomic64_dec_and_test
- arch_atomic64_inc_and_test
- arch_atomic64_add_negative
- arch_atomic64_add_return
- arch_atomic64_sub_return
- arch_atomic64_fetch_add
- arch_atomic64_fetch_sub
- arch_atomic64_cmpxchg
- arch_atomic64_try_cmpxchg
- arch_atomic64_xchg
- arch_atomic64_and
- arch_atomic64_fetch_and
- arch_atomic64_or
- arch_atomic64_fetch_or
- arch_atomic64_xor
- arch_atomic64_fetch_xor
1
2 #ifndef _ASM_X86_ATOMIC64_64_H
3 #define _ASM_X86_ATOMIC64_64_H
4
5 #include <linux/types.h>
6 #include <asm/alternative.h>
7 #include <asm/cmpxchg.h>
8
9
10
11 #define ATOMIC64_INIT(i) { (i) }
12
13
14
15
16
17
18
19
20 static inline s64 arch_atomic64_read(const atomic64_t *v)
21 {
22 return READ_ONCE((v)->counter);
23 }
24
25
26
27
28
29
30
31
32 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
33 {
34 WRITE_ONCE(v->counter, i);
35 }
36
37
38
39
40
41
42
43
44 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
45 {
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
48 : "er" (i), "m" (v->counter) : "memory");
49 }
50
51
52
53
54
55
56
57
58 static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
59 {
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
62 : "er" (i), "m" (v->counter) : "memory");
63 }
64
65
66
67
68
69
70
71
72
73
74 static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
75 {
76 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
77 }
78 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
79
80
81
82
83
84
85
86 static __always_inline void arch_atomic64_inc(atomic64_t *v)
87 {
88 asm volatile(LOCK_PREFIX "incq %0"
89 : "=m" (v->counter)
90 : "m" (v->counter) : "memory");
91 }
92 #define arch_atomic64_inc arch_atomic64_inc
93
94
95
96
97
98
99
100 static __always_inline void arch_atomic64_dec(atomic64_t *v)
101 {
102 asm volatile(LOCK_PREFIX "decq %0"
103 : "=m" (v->counter)
104 : "m" (v->counter) : "memory");
105 }
106 #define arch_atomic64_dec arch_atomic64_dec
107
108
109
110
111
112
113
114
115
116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
117 {
118 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
119 }
120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121
122
123
124
125
126
127
128
129
130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
131 {
132 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
133 }
134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135
136
137
138
139
140
141
142
143
144
145 static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
146 {
147 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
148 }
149 #define arch_atomic64_add_negative arch_atomic64_add_negative
150
151
152
153
154
155
156
157
158 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
159 {
160 return i + xadd(&v->counter, i);
161 }
162
163 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
164 {
165 return arch_atomic64_add_return(-i, v);
166 }
167
168 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
169 {
170 return xadd(&v->counter, i);
171 }
172
173 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
174 {
175 return xadd(&v->counter, -i);
176 }
177
178 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
179 {
180 return arch_cmpxchg(&v->counter, old, new);
181 }
182
183 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
184 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
185 {
186 return try_cmpxchg(&v->counter, old, new);
187 }
188
189 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
190 {
191 return arch_xchg(&v->counter, new);
192 }
193
194 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
195 {
196 asm volatile(LOCK_PREFIX "andq %1,%0"
197 : "+m" (v->counter)
198 : "er" (i)
199 : "memory");
200 }
201
202 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
203 {
204 s64 val = arch_atomic64_read(v);
205
206 do {
207 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
208 return val;
209 }
210
211 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
212 {
213 asm volatile(LOCK_PREFIX "orq %1,%0"
214 : "+m" (v->counter)
215 : "er" (i)
216 : "memory");
217 }
218
219 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
220 {
221 s64 val = arch_atomic64_read(v);
222
223 do {
224 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
225 return val;
226 }
227
228 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
229 {
230 asm volatile(LOCK_PREFIX "xorq %1,%0"
231 : "+m" (v->counter)
232 : "er" (i)
233 : "memory");
234 }
235
236 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
237 {
238 s64 val = arch_atomic64_read(v);
239
240 do {
241 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
242 return val;
243 }
244
245 #endif