1#ifndef __ASM_SH_ATOMIC_LLSC_H
2#define __ASM_SH_ATOMIC_LLSC_H
3
4/*
5 * SH-4A note:
6 *
7 * We basically get atomic_xxx_return() for free compared with
8 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
9 * encoding, so the retval is automatically set without having to
10 * do any special work.
11 */
12/*
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
16 */
17
18#define ATOMIC_OP(op)							\
19static inline void atomic_##op(int i, atomic_t *v)			\
20{									\
21	unsigned long tmp;						\
22									\
23	__asm__ __volatile__ (						\
24"1:	movli.l @%2, %0		! atomic_" #op "\n"			\
25"	" #op "	%1, %0				\n"			\
26"	movco.l	%0, @%2				\n"			\
27"	bf	1b				\n"			\
28	: "=&z" (tmp)							\
29	: "r" (i), "r" (&v->counter)					\
30	: "t");								\
31}
32
33#define ATOMIC_OP_RETURN(op)						\
34static inline int atomic_##op##_return(int i, atomic_t *v)		\
35{									\
36	unsigned long temp;						\
37									\
38	__asm__ __volatile__ (						\
39"1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
40"	" #op "	%1, %0					\n"		\
41"	movco.l	%0, @%2					\n"		\
42"	bf	1b					\n"		\
43"	synco						\n"		\
44	: "=&z" (temp)							\
45	: "r" (i), "r" (&v->counter)					\
46	: "t");								\
47									\
48	return temp;							\
49}
50
51#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
52
53ATOMIC_OPS(add)
54ATOMIC_OPS(sub)
55ATOMIC_OP(and)
56ATOMIC_OP(or)
57ATOMIC_OP(xor)
58
59#undef ATOMIC_OPS
60#undef ATOMIC_OP_RETURN
61#undef ATOMIC_OP
62
63#endif /* __ASM_SH_ATOMIC_LLSC_H */
64