1
2 #ifndef __ASM_SH_ATOMIC_GRB_H
3 #define __ASM_SH_ATOMIC_GRB_H
4
5 #define ATOMIC_OP(op) \
6 static inline void atomic_##op(int i, atomic_t *v) \
7 { \
8 int tmp; \
9 \
10 __asm__ __volatile__ ( \
11 " .align 2 \n\t" \
12 " mova 1f, r0 \n\t" \
13 " mov r15, r1 \n\t" \
14 " mov #-6, r15 \n\t" \
15 " mov.l @%1, %0 \n\t" \
16 " " #op " %2, %0 \n\t" \
17 " mov.l %0, @%1 \n\t" \
18 "1: mov r1, r15 \n\t" \
19 : "=&r" (tmp), \
20 "+r" (v) \
21 : "r" (i) \
22 : "memory" , "r0", "r1"); \
23 } \
24
25 #define ATOMIC_OP_RETURN(op) \
26 static inline int atomic_##op##_return(int i, atomic_t *v) \
27 { \
28 int tmp; \
29 \
30 __asm__ __volatile__ ( \
31 " .align 2 \n\t" \
32 " mova 1f, r0 \n\t" \
33 " mov r15, r1 \n\t" \
34 " mov #-6, r15 \n\t" \
35 " mov.l @%1, %0 \n\t" \
36 " " #op " %2, %0 \n\t" \
37 " mov.l %0, @%1 \n\t" \
38 "1: mov r1, r15 \n\t" \
39 : "=&r" (tmp), \
40 "+r" (v) \
41 : "r" (i) \
42 : "memory" , "r0", "r1"); \
43 \
44 return tmp; \
45 }
46
47 #define ATOMIC_FETCH_OP(op) \
48 static inline int atomic_fetch_##op(int i, atomic_t *v) \
49 { \
50 int res, tmp; \
51 \
52 __asm__ __volatile__ ( \
53 " .align 2 \n\t" \
54 " mova 1f, r0 \n\t" \
55 " mov r15, r1 \n\t" \
56 " mov #-6, r15 \n\t" \
57 " mov.l @%2, %0 \n\t" \
58 " mov %0, %1 \n\t" \
59 " " #op " %3, %0 \n\t" \
60 " mov.l %0, @%2 \n\t" \
61 "1: mov r1, r15 \n\t" \
62 : "=&r" (tmp), "=&r" (res), "+r" (v) \
63 : "r" (i) \
64 : "memory" , "r0", "r1"); \
65 \
66 return res; \
67 }
68
69 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
70
71 ATOMIC_OPS(add)
72 ATOMIC_OPS(sub)
73
74 #undef ATOMIC_OPS
75 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
76
77 ATOMIC_OPS(and)
78 ATOMIC_OPS(or)
79 ATOMIC_OPS(xor)
80
81 #undef ATOMIC_OPS
82 #undef ATOMIC_FETCH_OP
83 #undef ATOMIC_OP_RETURN
84 #undef ATOMIC_OP
85
86 #endif