This source file includes following definitions.
- xchg_u32
- xchg_u16
- xchg_u8
- __cmpxchg_u32
1
2 #ifndef __ASM_SH_CMPXCHG_GRB_H
3 #define __ASM_SH_CMPXCHG_GRB_H
4
5 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
6 {
7 unsigned long retval;
8
9 __asm__ __volatile__ (
10 " .align 2 \n\t"
11 " mova 1f, r0 \n\t"
12 " nop \n\t"
13 " mov r15, r1 \n\t"
14 " mov #-4, r15 \n\t"
15 " mov.l @%1, %0 \n\t"
16 " mov.l %2, @%1 \n\t"
17 "1: mov r1, r15 \n\t"
18 : "=&r" (retval),
19 "+r" (m),
20 "+r" (val)
21 :
22 : "memory", "r0", "r1");
23
24 return retval;
25 }
26
27 static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val)
28 {
29 unsigned long retval;
30
31 __asm__ __volatile__ (
32 " .align 2 \n\t"
33 " mova 1f, r0 \n\t"
34 " mov r15, r1 \n\t"
35 " mov #-6, r15 \n\t"
36 " mov.w @%1, %0 \n\t"
37 " extu.w %0, %0 \n\t"
38 " mov.w %2, @%1 \n\t"
39 "1: mov r1, r15 \n\t"
40 : "=&r" (retval),
41 "+r" (m),
42 "+r" (val)
43 :
44 : "memory" , "r0", "r1");
45
46 return retval;
47 }
48
49 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
50 {
51 unsigned long retval;
52
53 __asm__ __volatile__ (
54 " .align 2 \n\t"
55 " mova 1f, r0 \n\t"
56 " mov r15, r1 \n\t"
57 " mov #-6, r15 \n\t"
58 " mov.b @%1, %0 \n\t"
59 " extu.b %0, %0 \n\t"
60 " mov.b %2, @%1 \n\t"
61 "1: mov r1, r15 \n\t"
62 : "=&r" (retval),
63 "+r" (m),
64 "+r" (val)
65 :
66 : "memory" , "r0", "r1");
67
68 return retval;
69 }
70
71 static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
72 unsigned long new)
73 {
74 unsigned long retval;
75
76 __asm__ __volatile__ (
77 " .align 2 \n\t"
78 " mova 1f, r0 \n\t"
79 " nop \n\t"
80 " mov r15, r1 \n\t"
81 " mov #-8, r15 \n\t"
82 " mov.l @%3, %0 \n\t"
83 " cmp/eq %0, %1 \n\t"
84 " bf 1f \n\t"
85 " mov.l %2, @%3 \n\t"
86 "1: mov r1, r15 \n\t"
87 : "=&r" (retval),
88 "+r" (old), "+r" (new)
89 : "r" (m)
90 : "memory" , "r0", "r1", "t");
91
92 return retval;
93 }
94
95 #endif