1/* spinlock.h: 32-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC_SPINLOCK_H
7#define __SPARC_SPINLOCK_H
8
9#ifndef __ASSEMBLY__
10
11#include <asm/psr.h>
12#include <asm/processor.h> /* for cpu_relax */
13
14#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
15
16#define arch_spin_unlock_wait(lock) \
17	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
18
19static inline void arch_spin_lock(arch_spinlock_t *lock)
20{
21	__asm__ __volatile__(
22	"\n1:\n\t"
23	"ldstub	[%0], %%g2\n\t"
24	"orcc	%%g2, 0x0, %%g0\n\t"
25	"bne,a	2f\n\t"
26	" ldub	[%0], %%g2\n\t"
27	".subsection	2\n"
28	"2:\n\t"
29	"orcc	%%g2, 0x0, %%g0\n\t"
30	"bne,a	2b\n\t"
31	" ldub	[%0], %%g2\n\t"
32	"b,a	1b\n\t"
33	".previous\n"
34	: /* no outputs */
35	: "r" (lock)
36	: "g2", "memory", "cc");
37}
38
39static inline int arch_spin_trylock(arch_spinlock_t *lock)
40{
41	unsigned int result;
42	__asm__ __volatile__("ldstub [%1], %0"
43			     : "=r" (result)
44			     : "r" (lock)
45			     : "memory");
46	return (result == 0);
47}
48
49static inline void arch_spin_unlock(arch_spinlock_t *lock)
50{
51	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
52}
53
54/* Read-write spinlocks, allowing multiple readers
55 * but only one writer.
56 *
57 * NOTE! it is quite common to have readers in interrupts
58 * but no interrupt writers. For those circumstances we
59 * can "mix" irq-safe locks - any writer needs to get a
60 * irq-safe write-lock, but readers can get non-irqsafe
61 * read-locks.
62 *
63 * XXX This might create some problems with my dual spinlock
64 * XXX scheme, deadlocks etc. -DaveM
65 *
66 * Sort of like atomic_t's on Sparc, but even more clever.
67 *
68 *	------------------------------------
69 *	| 24-bit counter           | wlock |  arch_rwlock_t
70 *	------------------------------------
71 *	 31                       8 7     0
72 *
73 * wlock signifies the one writer is in or somebody is updating
74 * counter. For a writer, if he successfully acquires the wlock,
75 * but counter is non-zero, he has to release the lock and wait,
76 * till both counter and wlock are zero.
77 *
78 * Unfortunately this scheme limits us to ~16,000,000 cpus.
79 */
80static inline void __arch_read_lock(arch_rwlock_t *rw)
81{
82	register arch_rwlock_t *lp asm("g1");
83	lp = rw;
84	__asm__ __volatile__(
85	"mov	%%o7, %%g4\n\t"
86	"call	___rw_read_enter\n\t"
87	" ldstub	[%%g1 + 3], %%g2\n"
88	: /* no outputs */
89	: "r" (lp)
90	: "g2", "g4", "memory", "cc");
91}
92
93#define arch_read_lock(lock) \
94do {	unsigned long flags; \
95	local_irq_save(flags); \
96	__arch_read_lock(lock); \
97	local_irq_restore(flags); \
98} while(0)
99
100static inline void __arch_read_unlock(arch_rwlock_t *rw)
101{
102	register arch_rwlock_t *lp asm("g1");
103	lp = rw;
104	__asm__ __volatile__(
105	"mov	%%o7, %%g4\n\t"
106	"call	___rw_read_exit\n\t"
107	" ldstub	[%%g1 + 3], %%g2\n"
108	: /* no outputs */
109	: "r" (lp)
110	: "g2", "g4", "memory", "cc");
111}
112
113#define arch_read_unlock(lock) \
114do {	unsigned long flags; \
115	local_irq_save(flags); \
116	__arch_read_unlock(lock); \
117	local_irq_restore(flags); \
118} while(0)
119
120static inline void arch_write_lock(arch_rwlock_t *rw)
121{
122	register arch_rwlock_t *lp asm("g1");
123	lp = rw;
124	__asm__ __volatile__(
125	"mov	%%o7, %%g4\n\t"
126	"call	___rw_write_enter\n\t"
127	" ldstub	[%%g1 + 3], %%g2\n"
128	: /* no outputs */
129	: "r" (lp)
130	: "g2", "g4", "memory", "cc");
131	*(volatile __u32 *)&lp->lock = ~0U;
132}
133
134static void inline arch_write_unlock(arch_rwlock_t *lock)
135{
136	__asm__ __volatile__(
137"	st		%%g0, [%0]"
138	: /* no outputs */
139	: "r" (lock)
140	: "memory");
141}
142
143static inline int arch_write_trylock(arch_rwlock_t *rw)
144{
145	unsigned int val;
146
147	__asm__ __volatile__("ldstub [%1 + 3], %0"
148			     : "=r" (val)
149			     : "r" (&rw->lock)
150			     : "memory");
151
152	if (val == 0) {
153		val = rw->lock & ~0xff;
154		if (val)
155			((volatile u8*)&rw->lock)[3] = 0;
156		else
157			*(volatile u32*)&rw->lock = ~0U;
158	}
159
160	return (val == 0);
161}
162
163static inline int __arch_read_trylock(arch_rwlock_t *rw)
164{
165	register arch_rwlock_t *lp asm("g1");
166	register int res asm("o0");
167	lp = rw;
168	__asm__ __volatile__(
169	"mov	%%o7, %%g4\n\t"
170	"call	___rw_read_try\n\t"
171	" ldstub	[%%g1 + 3], %%g2\n"
172	: "=r" (res)
173	: "r" (lp)
174	: "g2", "g4", "memory", "cc");
175	return res;
176}
177
178#define arch_read_trylock(lock) \
179({	unsigned long flags; \
180	int res; \
181	local_irq_save(flags); \
182	res = __arch_read_trylock(lock); \
183	local_irq_restore(flags); \
184	res; \
185})
186
187#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
188#define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
189#define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
190
191#define arch_spin_relax(lock)	cpu_relax()
192#define arch_read_relax(lock)	cpu_relax()
193#define arch_write_relax(lock)	cpu_relax()
194
195#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
196#define arch_write_can_lock(rw) (!(rw)->lock)
197
198#endif /* !(__ASSEMBLY__) */
199
200#endif /* __SPARC_SPINLOCK_H */
201