This source file includes following definitions.
- ATOMIC64_OPS
1
2
3
4
5
6
7
8
9
10 #ifndef __ASM_ATOMIC_LL_SC_H
11 #define __ASM_ATOMIC_LL_SC_H
12
13 #include <linux/stringify.h>
14
15 #if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
16 #define __LL_SC_FALLBACK(asm_ops) \
17 " b 3f\n" \
18 " .subsection 1\n" \
19 "3:\n" \
20 asm_ops "\n" \
21 " b 4f\n" \
22 " .previous\n" \
23 "4:\n"
24 #else
25 #define __LL_SC_FALLBACK(asm_ops) asm_ops
26 #endif
27
28 #ifndef CONFIG_CC_HAS_K_CONSTRAINT
29 #define K
30 #endif
31
32
33
34
35
36
37
38 #define ATOMIC_OP(op, asm_op, constraint) \
39 static inline void \
40 __ll_sc_atomic_##op(int i, atomic_t *v) \
41 { \
42 unsigned long tmp; \
43 int result; \
44 \
45 asm volatile("// atomic_" #op "\n" \
46 __LL_SC_FALLBACK( \
47 " prfm pstl1strm, %2\n" \
48 "1: ldxr %w0, %2\n" \
49 " " #asm_op " %w0, %w0, %w3\n" \
50 " stxr %w1, %w0, %2\n" \
51 " cbnz %w1, 1b\n") \
52 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
53 : __stringify(constraint) "r" (i)); \
54 }
55
56 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
57 static inline int \
58 __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
59 { \
60 unsigned long tmp; \
61 int result; \
62 \
63 asm volatile("// atomic_" #op "_return" #name "\n" \
64 __LL_SC_FALLBACK( \
65 " prfm pstl1strm, %2\n" \
66 "1: ld" #acq "xr %w0, %2\n" \
67 " " #asm_op " %w0, %w0, %w3\n" \
68 " st" #rel "xr %w1, %w0, %2\n" \
69 " cbnz %w1, 1b\n" \
70 " " #mb ) \
71 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
72 : __stringify(constraint) "r" (i) \
73 : cl); \
74 \
75 return result; \
76 }
77
78 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
79 static inline int \
80 __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
81 { \
82 unsigned long tmp; \
83 int val, result; \
84 \
85 asm volatile("// atomic_fetch_" #op #name "\n" \
86 __LL_SC_FALLBACK( \
87 " prfm pstl1strm, %3\n" \
88 "1: ld" #acq "xr %w0, %3\n" \
89 " " #asm_op " %w1, %w0, %w4\n" \
90 " st" #rel "xr %w2, %w1, %3\n" \
91 " cbnz %w2, 1b\n" \
92 " " #mb ) \
93 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
94 : __stringify(constraint) "r" (i) \
95 : cl); \
96 \
97 return result; \
98 }
99
100 #define ATOMIC_OPS(...) \
101 ATOMIC_OP(__VA_ARGS__) \
102 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
103 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
104 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
105 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
106 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
107 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
108 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
109 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
110
111 ATOMIC_OPS(add, add, I)
112 ATOMIC_OPS(sub, sub, J)
113
114 #undef ATOMIC_OPS
115 #define ATOMIC_OPS(...) \
116 ATOMIC_OP(__VA_ARGS__) \
117 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
118 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
119 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
120 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
121
122 ATOMIC_OPS(and, and, K)
123 ATOMIC_OPS(or, orr, K)
124 ATOMIC_OPS(xor, eor, K)
125
126
127
128
129
130 ATOMIC_OPS(andnot, bic, )
131
132 #undef ATOMIC_OPS
133 #undef ATOMIC_FETCH_OP
134 #undef ATOMIC_OP_RETURN
135 #undef ATOMIC_OP
136
137 #define ATOMIC64_OP(op, asm_op, constraint) \
138 static inline void \
139 __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
140 { \
141 s64 result; \
142 unsigned long tmp; \
143 \
144 asm volatile("// atomic64_" #op "\n" \
145 __LL_SC_FALLBACK( \
146 " prfm pstl1strm, %2\n" \
147 "1: ldxr %0, %2\n" \
148 " " #asm_op " %0, %0, %3\n" \
149 " stxr %w1, %0, %2\n" \
150 " cbnz %w1, 1b") \
151 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
152 : __stringify(constraint) "r" (i)); \
153 }
154
155 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
156 static inline long \
157 __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
158 { \
159 s64 result; \
160 unsigned long tmp; \
161 \
162 asm volatile("// atomic64_" #op "_return" #name "\n" \
163 __LL_SC_FALLBACK( \
164 " prfm pstl1strm, %2\n" \
165 "1: ld" #acq "xr %0, %2\n" \
166 " " #asm_op " %0, %0, %3\n" \
167 " st" #rel "xr %w1, %0, %2\n" \
168 " cbnz %w1, 1b\n" \
169 " " #mb ) \
170 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
171 : __stringify(constraint) "r" (i) \
172 : cl); \
173 \
174 return result; \
175 }
176
177 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
178 static inline long \
179 __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
180 { \
181 s64 result, val; \
182 unsigned long tmp; \
183 \
184 asm volatile("// atomic64_fetch_" #op #name "\n" \
185 __LL_SC_FALLBACK( \
186 " prfm pstl1strm, %3\n" \
187 "1: ld" #acq "xr %0, %3\n" \
188 " " #asm_op " %1, %0, %4\n" \
189 " st" #rel "xr %w2, %1, %3\n" \
190 " cbnz %w2, 1b\n" \
191 " " #mb ) \
192 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
193 : __stringify(constraint) "r" (i) \
194 : cl); \
195 \
196 return result; \
197 }
198
199 #define ATOMIC64_OPS(...) \
200 ATOMIC64_OP(__VA_ARGS__) \
201 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
202 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
203 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
204 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
205 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
206 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
207 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
208 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
209
210 ATOMIC64_OPS(add, add, I)
211 ATOMIC64_OPS(sub, sub, J)
212
213 #undef ATOMIC64_OPS
214 #define ATOMIC64_OPS(...) \
215 ATOMIC64_OP(__VA_ARGS__) \
216 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
217 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
218 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
219 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
220
221 ATOMIC64_OPS(and, and, L)
222 ATOMIC64_OPS(or, orr, L)
223 ATOMIC64_OPS(xor, eor, L)
224
225
226
227
228
229 ATOMIC64_OPS(andnot, bic, )
230
231 #undef ATOMIC64_OPS
232 #undef ATOMIC64_FETCH_OP
233 #undef ATOMIC64_OP_RETURN
234 #undef ATOMIC64_OP
235
236 static inline s64
237 __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
238 {
239 s64 result;
240 unsigned long tmp;
241
242 asm volatile("// atomic64_dec_if_positive\n"
243 __LL_SC_FALLBACK(
244 " prfm pstl1strm, %2\n"
245 "1: ldxr %0, %2\n"
246 " subs %0, %0, #1\n"
247 " b.lt 2f\n"
248 " stlxr %w1, %0, %2\n"
249 " cbnz %w1, 1b\n"
250 " dmb ish\n"
251 "2:")
252 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
253 :
254 : "cc", "memory");
255
256 return result;
257 }
258
259 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
260 static inline u##sz \
261 __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
262 unsigned long old, \
263 u##sz new) \
264 { \
265 unsigned long tmp; \
266 u##sz oldval; \
267 \
268
269
270
271
272 \
273 if (sz < 32) \
274 old = (u##sz)old; \
275 \
276 asm volatile( \
277 __LL_SC_FALLBACK( \
278 " prfm pstl1strm, %[v]\n" \
279 "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
280 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
281 " cbnz %" #w "[tmp], 2f\n" \
282 " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
283 " cbnz %w[tmp], 1b\n" \
284 " " #mb "\n" \
285 "2:") \
286 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
287 [v] "+Q" (*(u##sz *)ptr) \
288 : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
289 : cl); \
290 \
291 return oldval; \
292 }
293
294
295
296
297
298
299 __CMPXCHG_CASE(w, b, , 8, , , , , K)
300 __CMPXCHG_CASE(w, h, , 16, , , , , K)
301 __CMPXCHG_CASE(w, , , 32, , , , , K)
302 __CMPXCHG_CASE( , , , 64, , , , , L)
303 __CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
304 __CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
305 __CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
306 __CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
307 __CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
308 __CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
309 __CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
310 __CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
311 __CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
312 __CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
313 __CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
314 __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
315
316 #undef __CMPXCHG_CASE
317
318 #define __CMPXCHG_DBL(name, mb, rel, cl) \
319 static inline long \
320 __ll_sc__cmpxchg_double##name(unsigned long old1, \
321 unsigned long old2, \
322 unsigned long new1, \
323 unsigned long new2, \
324 volatile void *ptr) \
325 { \
326 unsigned long tmp, ret; \
327 \
328 asm volatile("// __cmpxchg_double" #name "\n" \
329 __LL_SC_FALLBACK( \
330 " prfm pstl1strm, %2\n" \
331 "1: ldxp %0, %1, %2\n" \
332 " eor %0, %0, %3\n" \
333 " eor %1, %1, %4\n" \
334 " orr %1, %0, %1\n" \
335 " cbnz %1, 2f\n" \
336 " st" #rel "xp %w0, %5, %6, %2\n" \
337 " cbnz %w0, 1b\n" \
338 " " #mb "\n" \
339 "2:") \
340 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
341 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
342 : cl); \
343 \
344 return ret; \
345 }
346
347 __CMPXCHG_DBL( , , , )
348 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
349
350 #undef __CMPXCHG_DBL
351 #undef K
352
353 #endif