This source file includes following definitions.
- ATOMIC_OP_ADD_RETURN
- ATOMIC_FETCH_OP_AND
- ATOMIC64_OP_ADD_RETURN
- ATOMIC64_FETCH_OP_AND
- ATOMIC64_FETCH_OP_SUB
1
2
3
4
5
6
7
8
9
10 #ifndef __ASM_ATOMIC_LSE_H
11 #define __ASM_ATOMIC_LSE_H
12
13 #define ATOMIC_OP(op, asm_op) \
14 static inline void __lse_atomic_##op(int i, atomic_t *v) \
15 { \
16 asm volatile( \
17 __LSE_PREAMBLE \
18 " " #asm_op " %w[i], %[v]\n" \
19 : [i] "+r" (i), [v] "+Q" (v->counter) \
20 : "r" (v)); \
21 }
22
23 ATOMIC_OP(andnot, stclr)
24 ATOMIC_OP(or, stset)
25 ATOMIC_OP(xor, steor)
26 ATOMIC_OP(add, stadd)
27
28 #undef ATOMIC_OP
29
30 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
31 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
32 { \
33 asm volatile( \
34 __LSE_PREAMBLE \
35 " " #asm_op #mb " %w[i], %w[i], %[v]" \
36 : [i] "+r" (i), [v] "+Q" (v->counter) \
37 : "r" (v) \
38 : cl); \
39 \
40 return i; \
41 }
42
43 #define ATOMIC_FETCH_OPS(op, asm_op) \
44 ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
45 ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
46 ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
47 ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
48
49 ATOMIC_FETCH_OPS(andnot, ldclr)
50 ATOMIC_FETCH_OPS(or, ldset)
51 ATOMIC_FETCH_OPS(xor, ldeor)
52 ATOMIC_FETCH_OPS(add, ldadd)
53
54 #undef ATOMIC_FETCH_OP
55 #undef ATOMIC_FETCH_OPS
56
57 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
58 static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
59 { \
60 u32 tmp; \
61 \
62 asm volatile( \
63 __LSE_PREAMBLE \
64 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
65 " add %w[i], %w[i], %w[tmp]" \
66 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
67 : "r" (v) \
68 : cl); \
69 \
70 return i; \
71 }
72
73 ATOMIC_OP_ADD_RETURN(_relaxed, )
74 ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
75 ATOMIC_OP_ADD_RETURN(_release, l, "memory")
76 ATOMIC_OP_ADD_RETURN( , al, "memory")
77
78 #undef ATOMIC_OP_ADD_RETURN
79
80 static inline void __lse_atomic_and(int i, atomic_t *v)
81 {
82 asm volatile(
83 __LSE_PREAMBLE
84 " mvn %w[i], %w[i]\n"
85 " stclr %w[i], %[v]"
86 : [i] "+&r" (i), [v] "+Q" (v->counter)
87 : "r" (v));
88 }
89
90 #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
91 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
92 { \
93 asm volatile( \
94 __LSE_PREAMBLE \
95 " mvn %w[i], %w[i]\n" \
96 " ldclr" #mb " %w[i], %w[i], %[v]" \
97 : [i] "+&r" (i), [v] "+Q" (v->counter) \
98 : "r" (v) \
99 : cl); \
100 \
101 return i; \
102 }
103
104 ATOMIC_FETCH_OP_AND(_relaxed, )
105 ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
106 ATOMIC_FETCH_OP_AND(_release, l, "memory")
107 ATOMIC_FETCH_OP_AND( , al, "memory")
108
109 #undef ATOMIC_FETCH_OP_AND
110
111 static inline void __lse_atomic_sub(int i, atomic_t *v)
112 {
113 asm volatile(
114 __LSE_PREAMBLE
115 " neg %w[i], %w[i]\n"
116 " stadd %w[i], %[v]"
117 : [i] "+&r" (i), [v] "+Q" (v->counter)
118 : "r" (v));
119 }
120
121 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
122 static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
123 { \
124 u32 tmp; \
125 \
126 asm volatile( \
127 __LSE_PREAMBLE \
128 " neg %w[i], %w[i]\n" \
129 " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
130 " add %w[i], %w[i], %w[tmp]" \
131 : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
132 : "r" (v) \
133 : cl); \
134 \
135 return i; \
136 }
137
138 ATOMIC_OP_SUB_RETURN(_relaxed, )
139 ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
140 ATOMIC_OP_SUB_RETURN(_release, l, "memory")
141 ATOMIC_OP_SUB_RETURN( , al, "memory")
142
143 #undef ATOMIC_OP_SUB_RETURN
144
145 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
146 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
147 { \
148 asm volatile( \
149 __LSE_PREAMBLE \
150 " neg %w[i], %w[i]\n" \
151 " ldadd" #mb " %w[i], %w[i], %[v]" \
152 : [i] "+&r" (i), [v] "+Q" (v->counter) \
153 : "r" (v) \
154 : cl); \
155 \
156 return i; \
157 }
158
159 ATOMIC_FETCH_OP_SUB(_relaxed, )
160 ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
161 ATOMIC_FETCH_OP_SUB(_release, l, "memory")
162 ATOMIC_FETCH_OP_SUB( , al, "memory")
163
164 #undef ATOMIC_FETCH_OP_SUB
165
166 #define ATOMIC64_OP(op, asm_op) \
167 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
168 { \
169 asm volatile( \
170 __LSE_PREAMBLE \
171 " " #asm_op " %[i], %[v]\n" \
172 : [i] "+r" (i), [v] "+Q" (v->counter) \
173 : "r" (v)); \
174 }
175
176 ATOMIC64_OP(andnot, stclr)
177 ATOMIC64_OP(or, stset)
178 ATOMIC64_OP(xor, steor)
179 ATOMIC64_OP(add, stadd)
180
181 #undef ATOMIC64_OP
182
183 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
184 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
185 { \
186 asm volatile( \
187 __LSE_PREAMBLE \
188 " " #asm_op #mb " %[i], %[i], %[v]" \
189 : [i] "+r" (i), [v] "+Q" (v->counter) \
190 : "r" (v) \
191 : cl); \
192 \
193 return i; \
194 }
195
196 #define ATOMIC64_FETCH_OPS(op, asm_op) \
197 ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
198 ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
199 ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
200 ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
201
202 ATOMIC64_FETCH_OPS(andnot, ldclr)
203 ATOMIC64_FETCH_OPS(or, ldset)
204 ATOMIC64_FETCH_OPS(xor, ldeor)
205 ATOMIC64_FETCH_OPS(add, ldadd)
206
207 #undef ATOMIC64_FETCH_OP
208 #undef ATOMIC64_FETCH_OPS
209
210 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
211 static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
212 { \
213 unsigned long tmp; \
214 \
215 asm volatile( \
216 __LSE_PREAMBLE \
217 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
218 " add %[i], %[i], %x[tmp]" \
219 : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
220 : "r" (v) \
221 : cl); \
222 \
223 return i; \
224 }
225
226 ATOMIC64_OP_ADD_RETURN(_relaxed, )
227 ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
228 ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
229 ATOMIC64_OP_ADD_RETURN( , al, "memory")
230
231 #undef ATOMIC64_OP_ADD_RETURN
232
233 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
234 {
235 asm volatile(
236 __LSE_PREAMBLE
237 " mvn %[i], %[i]\n"
238 " stclr %[i], %[v]"
239 : [i] "+&r" (i), [v] "+Q" (v->counter)
240 : "r" (v));
241 }
242
243 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
244 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
245 { \
246 asm volatile( \
247 __LSE_PREAMBLE \
248 " mvn %[i], %[i]\n" \
249 " ldclr" #mb " %[i], %[i], %[v]" \
250 : [i] "+&r" (i), [v] "+Q" (v->counter) \
251 : "r" (v) \
252 : cl); \
253 \
254 return i; \
255 }
256
257 ATOMIC64_FETCH_OP_AND(_relaxed, )
258 ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
259 ATOMIC64_FETCH_OP_AND(_release, l, "memory")
260 ATOMIC64_FETCH_OP_AND( , al, "memory")
261
262 #undef ATOMIC64_FETCH_OP_AND
263
264 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
265 {
266 asm volatile(
267 __LSE_PREAMBLE
268 " neg %[i], %[i]\n"
269 " stadd %[i], %[v]"
270 : [i] "+&r" (i), [v] "+Q" (v->counter)
271 : "r" (v));
272 }
273
274 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
275 static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
276 { \
277 unsigned long tmp; \
278 \
279 asm volatile( \
280 __LSE_PREAMBLE \
281 " neg %[i], %[i]\n" \
282 " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
283 " add %[i], %[i], %x[tmp]" \
284 : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
285 : "r" (v) \
286 : cl); \
287 \
288 return i; \
289 }
290
291 ATOMIC64_OP_SUB_RETURN(_relaxed, )
292 ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
293 ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
294 ATOMIC64_OP_SUB_RETURN( , al, "memory")
295
296 #undef ATOMIC64_OP_SUB_RETURN
297
298 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
299 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
300 { \
301 asm volatile( \
302 __LSE_PREAMBLE \
303 " neg %[i], %[i]\n" \
304 " ldadd" #mb " %[i], %[i], %[v]" \
305 : [i] "+&r" (i), [v] "+Q" (v->counter) \
306 : "r" (v) \
307 : cl); \
308 \
309 return i; \
310 }
311
312 ATOMIC64_FETCH_OP_SUB(_relaxed, )
313 ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
314 ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
315 ATOMIC64_FETCH_OP_SUB( , al, "memory")
316
317 #undef ATOMIC64_FETCH_OP_SUB
318
319 static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
320 {
321 unsigned long tmp;
322
323 asm volatile(
324 __LSE_PREAMBLE
325 "1: ldr %x[tmp], %[v]\n"
326 " subs %[ret], %x[tmp], #1\n"
327 " b.lt 2f\n"
328 " casal %x[tmp], %[ret], %[v]\n"
329 " sub %x[tmp], %x[tmp], #1\n"
330 " sub %x[tmp], %x[tmp], %[ret]\n"
331 " cbnz %x[tmp], 1b\n"
332 "2:"
333 : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
334 :
335 : "cc", "memory");
336
337 return (long)v;
338 }
339
340 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
341 static __always_inline u##sz \
342 __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
343 u##sz old, \
344 u##sz new) \
345 { \
346 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
347 register u##sz x1 asm ("x1") = old; \
348 register u##sz x2 asm ("x2") = new; \
349 unsigned long tmp; \
350 \
351 asm volatile( \
352 __LSE_PREAMBLE \
353 " mov %" #w "[tmp], %" #w "[old]\n" \
354 " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
355 " mov %" #w "[ret], %" #w "[tmp]" \
356 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \
357 [tmp] "=&r" (tmp) \
358 : [old] "r" (x1), [new] "r" (x2) \
359 : cl); \
360 \
361 return x0; \
362 }
363
364 __CMPXCHG_CASE(w, b, , 8, )
365 __CMPXCHG_CASE(w, h, , 16, )
366 __CMPXCHG_CASE(w, , , 32, )
367 __CMPXCHG_CASE(x, , , 64, )
368 __CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
369 __CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
370 __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
371 __CMPXCHG_CASE(x, , acq_, 64, a, "memory")
372 __CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
373 __CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
374 __CMPXCHG_CASE(w, , rel_, 32, l, "memory")
375 __CMPXCHG_CASE(x, , rel_, 64, l, "memory")
376 __CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
377 __CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
378 __CMPXCHG_CASE(w, , mb_, 32, al, "memory")
379 __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
380
381 #undef __CMPXCHG_CASE
382
383 #define __CMPXCHG_DBL(name, mb, cl...) \
384 static __always_inline long \
385 __lse__cmpxchg_double##name(unsigned long old1, \
386 unsigned long old2, \
387 unsigned long new1, \
388 unsigned long new2, \
389 volatile void *ptr) \
390 { \
391 unsigned long oldval1 = old1; \
392 unsigned long oldval2 = old2; \
393 register unsigned long x0 asm ("x0") = old1; \
394 register unsigned long x1 asm ("x1") = old2; \
395 register unsigned long x2 asm ("x2") = new1; \
396 register unsigned long x3 asm ("x3") = new2; \
397 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
398 \
399 asm volatile( \
400 __LSE_PREAMBLE \
401 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
402 " eor %[old1], %[old1], %[oldval1]\n" \
403 " eor %[old2], %[old2], %[oldval2]\n" \
404 " orr %[old1], %[old1], %[old2]" \
405 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
406 [v] "+Q" (*(unsigned long *)ptr) \
407 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
408 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
409 : cl); \
410 \
411 return x0; \
412 }
413
414 __CMPXCHG_DBL( , )
415 __CMPXCHG_DBL(_mb, al, "memory")
416
417 #undef __CMPXCHG_DBL
418
419 #endif