1
2
3
4
5
6
7
8
9
10
11
12
13 #ifndef _XTENSA_ATOMIC_H
14 #define _XTENSA_ATOMIC_H
15
16 #include <linux/stringify.h>
17 #include <linux/types.h>
18 #include <asm/processor.h>
19 #include <asm/cmpxchg.h>
20 #include <asm/barrier.h>
21
22 #define ATOMIC_INIT(i) { (i) }
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #define atomic_read(v) READ_ONCE((v)->counter)
49
50
51
52
53
54
55
56
57 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
58
59 #if XCHAL_HAVE_EXCLUSIVE
60 #define ATOMIC_OP(op) \
61 static inline void atomic_##op(int i, atomic_t *v) \
62 { \
63 unsigned long tmp; \
64 int result; \
65 \
66 __asm__ __volatile__( \
67 "1: l32ex %1, %3\n" \
68 " " #op " %0, %1, %2\n" \
69 " s32ex %0, %3\n" \
70 " getex %0\n" \
71 " beqz %0, 1b\n" \
72 : "=&a" (result), "=&a" (tmp) \
73 : "a" (i), "a" (v) \
74 : "memory" \
75 ); \
76 } \
77
78 #define ATOMIC_OP_RETURN(op) \
79 static inline int atomic_##op##_return(int i, atomic_t *v) \
80 { \
81 unsigned long tmp; \
82 int result; \
83 \
84 __asm__ __volatile__( \
85 "1: l32ex %1, %3\n" \
86 " " #op " %0, %1, %2\n" \
87 " s32ex %0, %3\n" \
88 " getex %0\n" \
89 " beqz %0, 1b\n" \
90 " " #op " %0, %1, %2\n" \
91 : "=&a" (result), "=&a" (tmp) \
92 : "a" (i), "a" (v) \
93 : "memory" \
94 ); \
95 \
96 return result; \
97 }
98
99 #define ATOMIC_FETCH_OP(op) \
100 static inline int atomic_fetch_##op(int i, atomic_t *v) \
101 { \
102 unsigned long tmp; \
103 int result; \
104 \
105 __asm__ __volatile__( \
106 "1: l32ex %1, %3\n" \
107 " " #op " %0, %1, %2\n" \
108 " s32ex %0, %3\n" \
109 " getex %0\n" \
110 " beqz %0, 1b\n" \
111 : "=&a" (result), "=&a" (tmp) \
112 : "a" (i), "a" (v) \
113 : "memory" \
114 ); \
115 \
116 return tmp; \
117 }
118
119 #elif XCHAL_HAVE_S32C1I
120 #define ATOMIC_OP(op) \
121 static inline void atomic_##op(int i, atomic_t * v) \
122 { \
123 unsigned long tmp; \
124 int result; \
125 \
126 __asm__ __volatile__( \
127 "1: l32i %1, %3, 0\n" \
128 " wsr %1, scompare1\n" \
129 " " #op " %0, %1, %2\n" \
130 " s32c1i %0, %3, 0\n" \
131 " bne %0, %1, 1b\n" \
132 : "=&a" (result), "=&a" (tmp) \
133 : "a" (i), "a" (v) \
134 : "memory" \
135 ); \
136 } \
137
138 #define ATOMIC_OP_RETURN(op) \
139 static inline int atomic_##op##_return(int i, atomic_t * v) \
140 { \
141 unsigned long tmp; \
142 int result; \
143 \
144 __asm__ __volatile__( \
145 "1: l32i %1, %3, 0\n" \
146 " wsr %1, scompare1\n" \
147 " " #op " %0, %1, %2\n" \
148 " s32c1i %0, %3, 0\n" \
149 " bne %0, %1, 1b\n" \
150 " " #op " %0, %0, %2\n" \
151 : "=&a" (result), "=&a" (tmp) \
152 : "a" (i), "a" (v) \
153 : "memory" \
154 ); \
155 \
156 return result; \
157 }
158
159 #define ATOMIC_FETCH_OP(op) \
160 static inline int atomic_fetch_##op(int i, atomic_t * v) \
161 { \
162 unsigned long tmp; \
163 int result; \
164 \
165 __asm__ __volatile__( \
166 "1: l32i %1, %3, 0\n" \
167 " wsr %1, scompare1\n" \
168 " " #op " %0, %1, %2\n" \
169 " s32c1i %0, %3, 0\n" \
170 " bne %0, %1, 1b\n" \
171 : "=&a" (result), "=&a" (tmp) \
172 : "a" (i), "a" (v) \
173 : "memory" \
174 ); \
175 \
176 return result; \
177 }
178
179 #else
180
181 #define ATOMIC_OP(op) \
182 static inline void atomic_##op(int i, atomic_t * v) \
183 { \
184 unsigned int vval; \
185 \
186 __asm__ __volatile__( \
187 " rsil a15, "__stringify(TOPLEVEL)"\n"\
188 " l32i %0, %2, 0\n" \
189 " " #op " %0, %0, %1\n" \
190 " s32i %0, %2, 0\n" \
191 " wsr a15, ps\n" \
192 " rsync\n" \
193 : "=&a" (vval) \
194 : "a" (i), "a" (v) \
195 : "a15", "memory" \
196 ); \
197 } \
198
199 #define ATOMIC_OP_RETURN(op) \
200 static inline int atomic_##op##_return(int i, atomic_t * v) \
201 { \
202 unsigned int vval; \
203 \
204 __asm__ __volatile__( \
205 " rsil a15,"__stringify(TOPLEVEL)"\n" \
206 " l32i %0, %2, 0\n" \
207 " " #op " %0, %0, %1\n" \
208 " s32i %0, %2, 0\n" \
209 " wsr a15, ps\n" \
210 " rsync\n" \
211 : "=&a" (vval) \
212 : "a" (i), "a" (v) \
213 : "a15", "memory" \
214 ); \
215 \
216 return vval; \
217 }
218
219 #define ATOMIC_FETCH_OP(op) \
220 static inline int atomic_fetch_##op(int i, atomic_t * v) \
221 { \
222 unsigned int tmp, vval; \
223 \
224 __asm__ __volatile__( \
225 " rsil a15,"__stringify(TOPLEVEL)"\n" \
226 " l32i %0, %3, 0\n" \
227 " " #op " %1, %0, %2\n" \
228 " s32i %1, %3, 0\n" \
229 " wsr a15, ps\n" \
230 " rsync\n" \
231 : "=&a" (vval), "=&a" (tmp) \
232 : "a" (i), "a" (v) \
233 : "a15", "memory" \
234 ); \
235 \
236 return vval; \
237 }
238
239 #endif
240
241 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
242
243 ATOMIC_OPS(add)
244 ATOMIC_OPS(sub)
245
246 #undef ATOMIC_OPS
247 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
248
249 ATOMIC_OPS(and)
250 ATOMIC_OPS(or)
251 ATOMIC_OPS(xor)
252
253 #undef ATOMIC_OPS
254 #undef ATOMIC_FETCH_OP
255 #undef ATOMIC_OP_RETURN
256 #undef ATOMIC_OP
257
258 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
259 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
260
261 #endif