This source file includes following definitions.
- set_bit
- __set_bit
- clear_bit
- clear_bit_unlock
- __clear_bit_unlock
- __clear_bit
- change_bit
- __change_bit
- test_and_set_bit
- __test_and_set_bit
- test_and_clear_bit
- __test_and_clear_bit
- test_and_change_bit
- __test_and_change_bit
- test_bit
- ffz
- __ffs
- ia64_fls
- fls
- __fls
- __arch_hweight64
1
2 #ifndef _ASM_IA64_BITOPS_H
3 #define _ASM_IA64_BITOPS_H
4
5
6
7
8
9
10
11
12
13 #ifndef _LINUX_BITOPS_H
14 #error only <linux/bitops.h> can be included directly
15 #endif
16
17 #include <linux/compiler.h>
18 #include <linux/types.h>
19 #include <asm/intrinsics.h>
20 #include <asm/barrier.h>
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 static __inline__ void
40 set_bit (int nr, volatile void *addr)
41 {
42 __u32 bit, old, new;
43 volatile __u32 *m;
44 CMPXCHG_BUGCHECK_DECL
45
46 m = (volatile __u32 *) addr + (nr >> 5);
47 bit = 1 << (nr & 31);
48 do {
49 CMPXCHG_BUGCHECK(m);
50 old = *m;
51 new = old | bit;
52 } while (cmpxchg_acq(m, old, new) != old);
53 }
54
55
56
57
58
59
60
61
62
63
64 static __inline__ void
65 __set_bit (int nr, volatile void *addr)
66 {
67 *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
68 }
69
70
71
72
73
74
75
76
77
78
79
80 static __inline__ void
81 clear_bit (int nr, volatile void *addr)
82 {
83 __u32 mask, old, new;
84 volatile __u32 *m;
85 CMPXCHG_BUGCHECK_DECL
86
87 m = (volatile __u32 *) addr + (nr >> 5);
88 mask = ~(1 << (nr & 31));
89 do {
90 CMPXCHG_BUGCHECK(m);
91 old = *m;
92 new = old & mask;
93 } while (cmpxchg_acq(m, old, new) != old);
94 }
95
96
97
98
99
100
101
102
103
104 static __inline__ void
105 clear_bit_unlock (int nr, volatile void *addr)
106 {
107 __u32 mask, old, new;
108 volatile __u32 *m;
109 CMPXCHG_BUGCHECK_DECL
110
111 m = (volatile __u32 *) addr + (nr >> 5);
112 mask = ~(1 << (nr & 31));
113 do {
114 CMPXCHG_BUGCHECK(m);
115 old = *m;
116 new = old & mask;
117 } while (cmpxchg_rel(m, old, new) != old);
118 }
119
120
121
122
123
124
125
126
127
128 static __inline__ void
129 __clear_bit_unlock(int nr, void *addr)
130 {
131 __u32 * const m = (__u32 *) addr + (nr >> 5);
132 __u32 const new = *m & ~(1 << (nr & 31));
133
134 ia64_st4_rel_nta(m, new);
135 }
136
137
138
139
140
141
142
143
144
145
146 static __inline__ void
147 __clear_bit (int nr, volatile void *addr)
148 {
149 *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150 }
151
152
153
154
155
156
157
158
159
160
161 static __inline__ void
162 change_bit (int nr, volatile void *addr)
163 {
164 __u32 bit, old, new;
165 volatile __u32 *m;
166 CMPXCHG_BUGCHECK_DECL
167
168 m = (volatile __u32 *) addr + (nr >> 5);
169 bit = (1 << (nr & 31));
170 do {
171 CMPXCHG_BUGCHECK(m);
172 old = *m;
173 new = old ^ bit;
174 } while (cmpxchg_acq(m, old, new) != old);
175 }
176
177
178
179
180
181
182
183
184
185
186 static __inline__ void
187 __change_bit (int nr, volatile void *addr)
188 {
189 *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190 }
191
192
193
194
195
196
197
198
199
200 static __inline__ int
201 test_and_set_bit (int nr, volatile void *addr)
202 {
203 __u32 bit, old, new;
204 volatile __u32 *m;
205 CMPXCHG_BUGCHECK_DECL
206
207 m = (volatile __u32 *) addr + (nr >> 5);
208 bit = 1 << (nr & 31);
209 do {
210 CMPXCHG_BUGCHECK(m);
211 old = *m;
212 new = old | bit;
213 } while (cmpxchg_acq(m, old, new) != old);
214 return (old & bit) != 0;
215 }
216
217
218
219
220
221
222
223
224 #define test_and_set_bit_lock test_and_set_bit
225
226
227
228
229
230
231
232
233
234
235 static __inline__ int
236 __test_and_set_bit (int nr, volatile void *addr)
237 {
238 __u32 *p = (__u32 *) addr + (nr >> 5);
239 __u32 m = 1 << (nr & 31);
240 int oldbitset = (*p & m) != 0;
241
242 *p |= m;
243 return oldbitset;
244 }
245
246
247
248
249
250
251
252
253
254 static __inline__ int
255 test_and_clear_bit (int nr, volatile void *addr)
256 {
257 __u32 mask, old, new;
258 volatile __u32 *m;
259 CMPXCHG_BUGCHECK_DECL
260
261 m = (volatile __u32 *) addr + (nr >> 5);
262 mask = ~(1 << (nr & 31));
263 do {
264 CMPXCHG_BUGCHECK(m);
265 old = *m;
266 new = old & mask;
267 } while (cmpxchg_acq(m, old, new) != old);
268 return (old & ~mask) != 0;
269 }
270
271
272
273
274
275
276
277
278
279
280 static __inline__ int
281 __test_and_clear_bit(int nr, volatile void * addr)
282 {
283 __u32 *p = (__u32 *) addr + (nr >> 5);
284 __u32 m = 1 << (nr & 31);
285 int oldbitset = (*p & m) != 0;
286
287 *p &= ~m;
288 return oldbitset;
289 }
290
291
292
293
294
295
296
297
298
299 static __inline__ int
300 test_and_change_bit (int nr, volatile void *addr)
301 {
302 __u32 bit, old, new;
303 volatile __u32 *m;
304 CMPXCHG_BUGCHECK_DECL
305
306 m = (volatile __u32 *) addr + (nr >> 5);
307 bit = (1 << (nr & 31));
308 do {
309 CMPXCHG_BUGCHECK(m);
310 old = *m;
311 new = old ^ bit;
312 } while (cmpxchg_acq(m, old, new) != old);
313 return (old & bit) != 0;
314 }
315
316
317
318
319
320
321
322
323 static __inline__ int
324 __test_and_change_bit (int nr, void *addr)
325 {
326 __u32 old, bit = (1 << (nr & 31));
327 __u32 *m = (__u32 *) addr + (nr >> 5);
328
329 old = *m;
330 *m = old ^ bit;
331 return (old & bit) != 0;
332 }
333
334 static __inline__ int
335 test_bit (int nr, const volatile void *addr)
336 {
337 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338 }
339
340
341
342
343
344
345
346
347 static inline unsigned long
348 ffz (unsigned long x)
349 {
350 unsigned long result;
351
352 result = ia64_popcnt(x & (~x - 1));
353 return result;
354 }
355
356
357
358
359
360
361
362 static __inline__ unsigned long
363 __ffs (unsigned long x)
364 {
365 unsigned long result;
366
367 result = ia64_popcnt((x-1) & ~x);
368 return result;
369 }
370
371 #ifdef __KERNEL__
372
373
374
375
376
377 static inline unsigned long
378 ia64_fls (unsigned long x)
379 {
380 long double d = x;
381 long exp;
382
383 exp = ia64_getf_exp(d);
384 return exp - 0xffff;
385 }
386
387
388
389
390
391 static inline int fls(unsigned int t)
392 {
393 unsigned long x = t & 0xffffffffu;
394
395 if (!x)
396 return 0;
397 x |= x >> 1;
398 x |= x >> 2;
399 x |= x >> 4;
400 x |= x >> 8;
401 x |= x >> 16;
402 return ia64_popcnt(x);
403 }
404
405
406
407
408
409 static inline unsigned long
410 __fls (unsigned long x)
411 {
412 x |= x >> 1;
413 x |= x >> 2;
414 x |= x >> 4;
415 x |= x >> 8;
416 x |= x >> 16;
417 x |= x >> 32;
418 return ia64_popcnt(x) - 1;
419 }
420
421 #include <asm-generic/bitops/fls64.h>
422
423 #include <asm-generic/bitops/builtin-ffs.h>
424
425
426
427
428
429 static __inline__ unsigned long __arch_hweight64(unsigned long x)
430 {
431 unsigned long result;
432 result = ia64_popcnt(x);
433 return result;
434 }
435
436 #define __arch_hweight32(x) ((unsigned int) __arch_hweight64((x) & 0xfffffffful))
437 #define __arch_hweight16(x) ((unsigned int) __arch_hweight64((x) & 0xfffful))
438 #define __arch_hweight8(x) ((unsigned int) __arch_hweight64((x) & 0xfful))
439
440 #include <asm-generic/bitops/const_hweight.h>
441
442 #endif
443
444 #include <asm-generic/bitops/find.h>
445
446 #ifdef __KERNEL__
447
448 #include <asm-generic/bitops/le.h>
449
450 #include <asm-generic/bitops/ext2-atomic-setbit.h>
451
452 #include <asm-generic/bitops/sched.h>
453
454 #endif
455
456 #endif