This source file includes following definitions.
- arch_atomic64_cmpxchg
- arch_atomic64_xchg
- arch_atomic64_set
- arch_atomic64_read
- arch_atomic64_add_return
- arch_atomic64_sub_return
- arch_atomic64_inc_return
- arch_atomic64_dec_return
- arch_atomic64_add
- arch_atomic64_sub
- arch_atomic64_inc
- arch_atomic64_dec
- arch_atomic64_add_unless
- arch_atomic64_inc_not_zero
- arch_atomic64_dec_if_positive
- arch_atomic64_and
- arch_atomic64_fetch_and
- arch_atomic64_or
- arch_atomic64_fetch_or
- arch_atomic64_xor
- arch_atomic64_fetch_xor
- arch_atomic64_fetch_add
1
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
4
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7
8
9
10
11 typedef struct {
12 s64 __aligned(8) counter;
13 } atomic64_t;
14
15 #define ATOMIC64_INIT(val) { (val) }
16
17 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
18 #ifndef ATOMIC64_EXPORT
19 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
20 #else
21 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
22 ATOMIC64_EXPORT(atomic64_##sym)
23 #endif
24
25 #ifdef CONFIG_X86_CMPXCHG64
26 #define __alternative_atomic64(f, g, out, in...) \
27 asm volatile("call %P[func]" \
28 : out : [func] "i" (atomic64_##g##_cx8), ## in)
29
30 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
31 #else
32 #define __alternative_atomic64(f, g, out, in...) \
33 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
34 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
35
36 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
37 ATOMIC64_DECL_ONE(sym##_386)
38
39 ATOMIC64_DECL_ONE(add_386);
40 ATOMIC64_DECL_ONE(sub_386);
41 ATOMIC64_DECL_ONE(inc_386);
42 ATOMIC64_DECL_ONE(dec_386);
43 #endif
44
45 #define alternative_atomic64(f, out, in...) \
46 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
47
48 ATOMIC64_DECL(read);
49 ATOMIC64_DECL(set);
50 ATOMIC64_DECL(xchg);
51 ATOMIC64_DECL(add_return);
52 ATOMIC64_DECL(sub_return);
53 ATOMIC64_DECL(inc_return);
54 ATOMIC64_DECL(dec_return);
55 ATOMIC64_DECL(dec_if_positive);
56 ATOMIC64_DECL(inc_not_zero);
57 ATOMIC64_DECL(add_unless);
58
59 #undef ATOMIC64_DECL
60 #undef ATOMIC64_DECL_ONE
61 #undef __ATOMIC64_DECL
62 #undef ATOMIC64_EXPORT
63
64
65
66
67
68
69
70
71
72
73
74 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
75 {
76 return arch_cmpxchg64(&v->counter, o, n);
77 }
78
79
80
81
82
83
84
85
86
87 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
88 {
89 s64 o;
90 unsigned high = (unsigned)(n >> 32);
91 unsigned low = (unsigned)n;
92 alternative_atomic64(xchg, "=&A" (o),
93 "S" (v), "b" (low), "c" (high)
94 : "memory");
95 return o;
96 }
97
98
99
100
101
102
103
104
105 static inline void arch_atomic64_set(atomic64_t *v, s64 i)
106 {
107 unsigned high = (unsigned)(i >> 32);
108 unsigned low = (unsigned)i;
109 alternative_atomic64(set, ,
110 "S" (v), "b" (low), "c" (high)
111 : "eax", "edx", "memory");
112 }
113
114
115
116
117
118
119
120 static inline s64 arch_atomic64_read(const atomic64_t *v)
121 {
122 s64 r;
123 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
124 return r;
125 }
126
127
128
129
130
131
132
133
134 static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
135 {
136 alternative_atomic64(add_return,
137 ASM_OUTPUT2("+A" (i), "+c" (v)),
138 ASM_NO_INPUT_CLOBBER("memory"));
139 return i;
140 }
141
142
143
144
145 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
146 {
147 alternative_atomic64(sub_return,
148 ASM_OUTPUT2("+A" (i), "+c" (v)),
149 ASM_NO_INPUT_CLOBBER("memory"));
150 return i;
151 }
152
153 static inline s64 arch_atomic64_inc_return(atomic64_t *v)
154 {
155 s64 a;
156 alternative_atomic64(inc_return, "=&A" (a),
157 "S" (v) : "memory", "ecx");
158 return a;
159 }
160 #define arch_atomic64_inc_return arch_atomic64_inc_return
161
162 static inline s64 arch_atomic64_dec_return(atomic64_t *v)
163 {
164 s64 a;
165 alternative_atomic64(dec_return, "=&A" (a),
166 "S" (v) : "memory", "ecx");
167 return a;
168 }
169 #define arch_atomic64_dec_return arch_atomic64_dec_return
170
171
172
173
174
175
176
177
178 static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
179 {
180 __alternative_atomic64(add, add_return,
181 ASM_OUTPUT2("+A" (i), "+c" (v)),
182 ASM_NO_INPUT_CLOBBER("memory"));
183 return i;
184 }
185
186
187
188
189
190
191
192
193 static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
194 {
195 __alternative_atomic64(sub, sub_return,
196 ASM_OUTPUT2("+A" (i), "+c" (v)),
197 ASM_NO_INPUT_CLOBBER("memory"));
198 return i;
199 }
200
201
202
203
204
205
206
207 static inline void arch_atomic64_inc(atomic64_t *v)
208 {
209 __alternative_atomic64(inc, inc_return, ,
210 "S" (v) : "memory", "eax", "ecx", "edx");
211 }
212 #define arch_atomic64_inc arch_atomic64_inc
213
214
215
216
217
218
219
220 static inline void arch_atomic64_dec(atomic64_t *v)
221 {
222 __alternative_atomic64(dec, dec_return, ,
223 "S" (v) : "memory", "eax", "ecx", "edx");
224 }
225 #define arch_atomic64_dec arch_atomic64_dec
226
227
228
229
230
231
232
233
234
235
236 static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
237 {
238 unsigned low = (unsigned)u;
239 unsigned high = (unsigned)(u >> 32);
240 alternative_atomic64(add_unless,
241 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
242 "S" (v) : "memory");
243 return (int)a;
244 }
245
246 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
247 {
248 int r;
249 alternative_atomic64(inc_not_zero, "=&a" (r),
250 "S" (v) : "ecx", "edx", "memory");
251 return r;
252 }
253 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
254
255 static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
256 {
257 s64 r;
258 alternative_atomic64(dec_if_positive, "=&A" (r),
259 "S" (v) : "ecx", "memory");
260 return r;
261 }
262 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
263
264 #undef alternative_atomic64
265 #undef __alternative_atomic64
266
267 static inline void arch_atomic64_and(s64 i, atomic64_t *v)
268 {
269 s64 old, c = 0;
270
271 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
272 c = old;
273 }
274
275 static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
276 {
277 s64 old, c = 0;
278
279 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
280 c = old;
281
282 return old;
283 }
284
285 static inline void arch_atomic64_or(s64 i, atomic64_t *v)
286 {
287 s64 old, c = 0;
288
289 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
290 c = old;
291 }
292
293 static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
294 {
295 s64 old, c = 0;
296
297 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
298 c = old;
299
300 return old;
301 }
302
303 static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
304 {
305 s64 old, c = 0;
306
307 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
308 c = old;
309 }
310
311 static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
312 {
313 s64 old, c = 0;
314
315 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
316 c = old;
317
318 return old;
319 }
320
321 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
322 {
323 s64 old, c = 0;
324
325 while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
326 c = old;
327
328 return old;
329 }
330
331 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
332
333 #endif