1
2#include <asm/spr-regs.h>
3
4#ifdef __ATOMIC_LIB__
5
6#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
7
8#define ATOMIC_QUALS
9#define ATOMIC_EXPORT(x)	EXPORT_SYMBOL(x)
10
11#else /* !OUTOFLINE && LIB */
12
13#define ATOMIC_OP_RETURN(op)
14#define ATOMIC_FETCH_OP(op)
15
16#endif /* OUTOFLINE */
17
18#else /* !__ATOMIC_LIB__ */
19
20#define ATOMIC_EXPORT(x)
21
22#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
23
24#define ATOMIC_OP_RETURN(op)						\
25extern int __atomic_##op##_return(int i, int *v);			\
26extern long long __atomic64_##op##_return(long long i, long long *v);
27
28#define ATOMIC_FETCH_OP(op)						\
29extern int __atomic32_fetch_##op(int i, int *v);			\
30extern long long __atomic64_fetch_##op(long long i, long long *v);
31
32#else /* !OUTOFLINE && !LIB */
33
34#define ATOMIC_QUALS	static inline
35
36#endif /* OUTOFLINE */
37#endif /* __ATOMIC_LIB__ */
38
39
40/*
41 * Note on the 64 bit inline asm variants...
42 *
43 * CSTD is a conditional instruction and needs a constrained memory reference.
44 * Normally 'U' provides the correct constraints for conditional instructions
45 * and this is used for the 32 bit version, however 'U' does not appear to work
46 * for 64 bit values (gcc-4.9)
47 *
48 * The exact constraint is that conditional instructions cannot deal with an
49 * immediate displacement in the memory reference, so what we do is we read the
50 * address through a volatile cast into a local variable in order to insure we
51 * _have_ to compute the correct address without displacement. This allows us
52 * to use the regular 'm' for the memory address.
53 *
54 * Furthermore, the %Ln operand, which prints the low word register (r+1),
55 * really only works for registers, this means we cannot allow immediate values
56 * for the 64 bit versions -- like we do for the 32 bit ones.
57 *
58 */
59
60#ifndef ATOMIC_OP_RETURN
61#define ATOMIC_OP_RETURN(op)						\
62ATOMIC_QUALS int __atomic_##op##_return(int i, int *v)			\
63{									\
64	int val;							\
65									\
66	asm volatile(							\
67	    "0:						\n"		\
68	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
69	    "	ckeq		icc3,cc7		\n"		\
70	    "	ld.p		%M0,%1			\n"		\
71	    "	orcr		cc7,cc7,cc3		\n"		\
72	    "   "#op"%I2	%1,%2,%1		\n"		\
73	    "	cst.p		%1,%M0		,cc3,#1	\n"		\
74	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
75	    "	beq		icc3,#0,0b		\n"		\
76	    : "+U"(*v), "=&r"(val)					\
77	    : "NPr"(i)							\
78	    : "memory", "cc7", "cc3", "icc3"				\
79	    );								\
80									\
81	return val;							\
82}									\
83ATOMIC_EXPORT(__atomic_##op##_return);					\
84									\
85ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v)	\
86{									\
87	long long *__v = READ_ONCE(v);					\
88	long long val;							\
89									\
90	asm volatile(							\
91	    "0:						\n"		\
92	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
93	    "	ckeq		icc3,cc7		\n"		\
94	    "	ldd.p		%M0,%1			\n"		\
95	    "	orcr		cc7,cc7,cc3		\n"		\
96	    "   "#op"cc		%L1,%L2,%L1,icc0	\n"		\
97	    "   "#op"x		%1,%2,%1,icc0		\n"		\
98	    "	cstd.p		%1,%M0		,cc3,#1	\n"		\
99	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
100	    "	beq		icc3,#0,0b		\n"		\
101	    : "+m"(*__v), "=&e"(val)					\
102	    : "e"(i)							\
103	    : "memory", "cc7", "cc3", "icc0", "icc3"			\
104	    );								\
105									\
106	return val;							\
107}									\
108ATOMIC_EXPORT(__atomic64_##op##_return);
109#endif
110
111#ifndef ATOMIC_FETCH_OP
112#define ATOMIC_FETCH_OP(op)						\
113ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v)			\
114{									\
115	int old, tmp;							\
116									\
117	asm volatile(							\
118		"0:						\n"	\
119		"	orcc		gr0,gr0,gr0,icc3	\n"	\
120		"	ckeq		icc3,cc7		\n"	\
121		"	ld.p		%M0,%1			\n"	\
122		"	orcr		cc7,cc7,cc3		\n"	\
123		"	"#op"%I3	%1,%3,%2		\n"	\
124		"	cst.p		%2,%M0		,cc3,#1	\n"	\
125		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
126		"	beq		icc3,#0,0b		\n"	\
127		: "+U"(*v), "=&r"(old), "=r"(tmp)			\
128		: "NPr"(i)						\
129		: "memory", "cc7", "cc3", "icc3"			\
130		);							\
131									\
132	return old;							\
133}									\
134ATOMIC_EXPORT(__atomic32_fetch_##op);					\
135									\
136ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v)	\
137{									\
138	long long *__v = READ_ONCE(v);					\
139	long long old, tmp;						\
140									\
141	asm volatile(							\
142		"0:						\n"	\
143		"	orcc		gr0,gr0,gr0,icc3	\n"	\
144		"	ckeq		icc3,cc7		\n"	\
145		"	ldd.p		%M0,%1			\n"	\
146		"	orcr		cc7,cc7,cc3		\n"	\
147		"	"#op"		%L1,%L3,%L2		\n"	\
148		"	"#op"		%1,%3,%2		\n"	\
149		"	cstd.p		%2,%M0		,cc3,#1	\n"	\
150		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
151		"	beq		icc3,#0,0b		\n"	\
152		: "+m"(*__v), "=&e"(old), "=e"(tmp)			\
153		: "e"(i)						\
154		: "memory", "cc7", "cc3", "icc3"			\
155		);							\
156									\
157	return old;							\
158}									\
159ATOMIC_EXPORT(__atomic64_fetch_##op);
160#endif
161
162ATOMIC_FETCH_OP(or)
163ATOMIC_FETCH_OP(and)
164ATOMIC_FETCH_OP(xor)
165
166ATOMIC_OP_RETURN(add)
167ATOMIC_OP_RETURN(sub)
168
169#undef ATOMIC_FETCH_OP
170#undef ATOMIC_OP_RETURN
171#undef ATOMIC_QUALS
172#undef ATOMIC_EXPORT
173