1
2 #ifndef _ASM_IA64_ATOMIC_H
3 #define _ASM_IA64_ATOMIC_H
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/types.h>
17
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
20
21
22 #define ATOMIC_INIT(i) { (i) }
23 #define ATOMIC64_INIT(i) { (i) }
24
25 #define atomic_read(v) READ_ONCE((v)->counter)
26 #define atomic64_read(v) READ_ONCE((v)->counter)
27
28 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29 #define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
30
31 #define ATOMIC_OP(op, c_op) \
32 static __inline__ int \
33 ia64_atomic_##op (int i, atomic_t *v) \
34 { \
35 __s32 old, new; \
36 CMPXCHG_BUGCHECK_DECL \
37 \
38 do { \
39 CMPXCHG_BUGCHECK(v); \
40 old = atomic_read(v); \
41 new = old c_op i; \
42 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
43 return new; \
44 }
45
46 #define ATOMIC_FETCH_OP(op, c_op) \
47 static __inline__ int \
48 ia64_atomic_fetch_##op (int i, atomic_t *v) \
49 { \
50 __s32 old, new; \
51 CMPXCHG_BUGCHECK_DECL \
52 \
53 do { \
54 CMPXCHG_BUGCHECK(v); \
55 old = atomic_read(v); \
56 new = old c_op i; \
57 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
58 return old; \
59 }
60
61 #define ATOMIC_OPS(op, c_op) \
62 ATOMIC_OP(op, c_op) \
63 ATOMIC_FETCH_OP(op, c_op)
64
65 ATOMIC_OPS(add, +)
66 ATOMIC_OPS(sub, -)
67
68 #ifdef __OPTIMIZE__
69 #define __ia64_atomic_const(i) \
70 static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
71 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
72 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
73 __ia64_atomic_p
74 #else
75 #define __ia64_atomic_const(i) 0
76 #endif
77
78 #define atomic_add_return(i,v) \
79 ({ \
80 int __ia64_aar_i = (i); \
81 __ia64_atomic_const(i) \
82 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
83 : ia64_atomic_add(__ia64_aar_i, v); \
84 })
85
86 #define atomic_sub_return(i,v) \
87 ({ \
88 int __ia64_asr_i = (i); \
89 __ia64_atomic_const(i) \
90 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
91 : ia64_atomic_sub(__ia64_asr_i, v); \
92 })
93
94 #define atomic_fetch_add(i,v) \
95 ({ \
96 int __ia64_aar_i = (i); \
97 __ia64_atomic_const(i) \
98 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
99 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
100 })
101
102 #define atomic_fetch_sub(i,v) \
103 ({ \
104 int __ia64_asr_i = (i); \
105 __ia64_atomic_const(i) \
106 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
107 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
108 })
109
110 ATOMIC_FETCH_OP(and, &)
111 ATOMIC_FETCH_OP(or, |)
112 ATOMIC_FETCH_OP(xor, ^)
113
114 #define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
115 #define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
116 #define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
117
118 #define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
119 #define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
120 #define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
121
122 #undef ATOMIC_OPS
123 #undef ATOMIC_FETCH_OP
124 #undef ATOMIC_OP
125
126 #define ATOMIC64_OP(op, c_op) \
127 static __inline__ s64 \
128 ia64_atomic64_##op (s64 i, atomic64_t *v) \
129 { \
130 s64 old, new; \
131 CMPXCHG_BUGCHECK_DECL \
132 \
133 do { \
134 CMPXCHG_BUGCHECK(v); \
135 old = atomic64_read(v); \
136 new = old c_op i; \
137 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
138 return new; \
139 }
140
141 #define ATOMIC64_FETCH_OP(op, c_op) \
142 static __inline__ s64 \
143 ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
144 { \
145 s64 old, new; \
146 CMPXCHG_BUGCHECK_DECL \
147 \
148 do { \
149 CMPXCHG_BUGCHECK(v); \
150 old = atomic64_read(v); \
151 new = old c_op i; \
152 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
153 return old; \
154 }
155
156 #define ATOMIC64_OPS(op, c_op) \
157 ATOMIC64_OP(op, c_op) \
158 ATOMIC64_FETCH_OP(op, c_op)
159
160 ATOMIC64_OPS(add, +)
161 ATOMIC64_OPS(sub, -)
162
163 #define atomic64_add_return(i,v) \
164 ({ \
165 s64 __ia64_aar_i = (i); \
166 __ia64_atomic_const(i) \
167 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
168 : ia64_atomic64_add(__ia64_aar_i, v); \
169 })
170
171 #define atomic64_sub_return(i,v) \
172 ({ \
173 s64 __ia64_asr_i = (i); \
174 __ia64_atomic_const(i) \
175 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
176 : ia64_atomic64_sub(__ia64_asr_i, v); \
177 })
178
179 #define atomic64_fetch_add(i,v) \
180 ({ \
181 s64 __ia64_aar_i = (i); \
182 __ia64_atomic_const(i) \
183 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
184 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
185 })
186
187 #define atomic64_fetch_sub(i,v) \
188 ({ \
189 s64 __ia64_asr_i = (i); \
190 __ia64_atomic_const(i) \
191 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
192 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
193 })
194
195 ATOMIC64_FETCH_OP(and, &)
196 ATOMIC64_FETCH_OP(or, |)
197 ATOMIC64_FETCH_OP(xor, ^)
198
199 #define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
200 #define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
201 #define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
202
203 #define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
204 #define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
205 #define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
206
207 #undef ATOMIC64_OPS
208 #undef ATOMIC64_FETCH_OP
209 #undef ATOMIC64_OP
210
211 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
212 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213
214 #define atomic64_cmpxchg(v, old, new) \
215 (cmpxchg(&((v)->counter), old, new))
216 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
217
218 #define atomic_add(i,v) (void)atomic_add_return((i), (v))
219 #define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
220
221 #define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
222 #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
223
224 #endif