1/*
2 * include/asm-xtensa/spinlock.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_SPINLOCK_H
12#define _XTENSA_SPINLOCK_H
13
14/*
15 * spinlock
16 *
17 * There is at most one owner of a spinlock.  There are not different
18 * types of spinlock owners like there are for rwlocks (see below).
19 *
20 * When trying to obtain a spinlock, the function "spins" forever, or busy-
21 * waits, until the lock is obtained.  When spinning, presumably some other
22 * owner will soon give up the spinlock making it available to others.  Use
23 * the trylock functions to avoid spinning forever.
24 *
25 * possible values:
26 *
27 *    0         nobody owns the spinlock
28 *    1         somebody owns the spinlock
29 */
30
31#define arch_spin_is_locked(x) ((x)->slock != 0)
32#define arch_spin_unlock_wait(lock) \
33	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
34
35#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
36
37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{
39	unsigned long tmp;
40
41	__asm__ __volatile__(
42			"       movi    %0, 0\n"
43			"       wsr     %0, scompare1\n"
44			"1:     movi    %0, 1\n"
45			"       s32c1i  %0, %1, 0\n"
46			"       bnez    %0, 1b\n"
47			: "=&a" (tmp)
48			: "a" (&lock->slock)
49			: "memory");
50}
51
52/* Returns 1 if the lock is obtained, 0 otherwise. */
53
54static inline int arch_spin_trylock(arch_spinlock_t *lock)
55{
56	unsigned long tmp;
57
58	__asm__ __volatile__(
59			"       movi    %0, 0\n"
60			"       wsr     %0, scompare1\n"
61			"       movi    %0, 1\n"
62			"       s32c1i  %0, %1, 0\n"
63			: "=&a" (tmp)
64			: "a" (&lock->slock)
65			: "memory");
66
67	return tmp == 0 ? 1 : 0;
68}
69
70static inline void arch_spin_unlock(arch_spinlock_t *lock)
71{
72	unsigned long tmp;
73
74	__asm__ __volatile__(
75			"       movi    %0, 0\n"
76			"       s32ri   %0, %1, 0\n"
77			: "=&a" (tmp)
78			: "a" (&lock->slock)
79			: "memory");
80}
81
82/*
83 * rwlock
84 *
85 * Read-write locks are really a more flexible spinlock.  They allow
86 * multiple readers but only one writer.  Write ownership is exclusive
87 * (i.e., all other readers and writers are blocked from ownership while
88 * there is a write owner).  These rwlocks are unfair to writers.  Writers
89 * can be starved for an indefinite time by readers.
90 *
91 * possible values:
92 *
93 *   0          nobody owns the rwlock
94 *  >0          one or more readers own the rwlock
95 *                (the positive value is the actual number of readers)
96 *  0x80000000  one writer owns the rwlock, no other writers, no readers
97 */
98
99#define arch_write_can_lock(x)  ((x)->lock == 0)
100
101static inline void arch_write_lock(arch_rwlock_t *rw)
102{
103	unsigned long tmp;
104
105	__asm__ __volatile__(
106			"       movi    %0, 0\n"
107			"       wsr     %0, scompare1\n"
108			"1:     movi    %0, 1\n"
109			"       slli    %0, %0, 31\n"
110			"       s32c1i  %0, %1, 0\n"
111			"       bnez    %0, 1b\n"
112			: "=&a" (tmp)
113			: "a" (&rw->lock)
114			: "memory");
115}
116
117/* Returns 1 if the lock is obtained, 0 otherwise. */
118
119static inline int arch_write_trylock(arch_rwlock_t *rw)
120{
121	unsigned long tmp;
122
123	__asm__ __volatile__(
124			"       movi    %0, 0\n"
125			"       wsr     %0, scompare1\n"
126			"       movi    %0, 1\n"
127			"       slli    %0, %0, 31\n"
128			"       s32c1i  %0, %1, 0\n"
129			: "=&a" (tmp)
130			: "a" (&rw->lock)
131			: "memory");
132
133	return tmp == 0 ? 1 : 0;
134}
135
136static inline void arch_write_unlock(arch_rwlock_t *rw)
137{
138	unsigned long tmp;
139
140	__asm__ __volatile__(
141			"       movi    %0, 0\n"
142			"       s32ri   %0, %1, 0\n"
143			: "=&a" (tmp)
144			: "a" (&rw->lock)
145			: "memory");
146}
147
148static inline void arch_read_lock(arch_rwlock_t *rw)
149{
150	unsigned long tmp;
151	unsigned long result;
152
153	__asm__ __volatile__(
154			"1:     l32i    %1, %2, 0\n"
155			"       bltz    %1, 1b\n"
156			"       wsr     %1, scompare1\n"
157			"       addi    %0, %1, 1\n"
158			"       s32c1i  %0, %2, 0\n"
159			"       bne     %0, %1, 1b\n"
160			: "=&a" (result), "=&a" (tmp)
161			: "a" (&rw->lock)
162			: "memory");
163}
164
165/* Returns 1 if the lock is obtained, 0 otherwise. */
166
167static inline int arch_read_trylock(arch_rwlock_t *rw)
168{
169	unsigned long result;
170	unsigned long tmp;
171
172	__asm__ __volatile__(
173			"       l32i    %1, %2, 0\n"
174			"       addi    %0, %1, 1\n"
175			"       bltz    %0, 1f\n"
176			"       wsr     %1, scompare1\n"
177			"       s32c1i  %0, %2, 0\n"
178			"       sub     %0, %0, %1\n"
179			"1:\n"
180			: "=&a" (result), "=&a" (tmp)
181			: "a" (&rw->lock)
182			: "memory");
183
184	return result == 0;
185}
186
187static inline void arch_read_unlock(arch_rwlock_t *rw)
188{
189	unsigned long tmp1, tmp2;
190
191	__asm__ __volatile__(
192			"1:     l32i    %1, %2, 0\n"
193			"       addi    %0, %1, -1\n"
194			"       wsr     %1, scompare1\n"
195			"       s32c1i  %0, %2, 0\n"
196			"       bne     %0, %1, 1b\n"
197			: "=&a" (tmp1), "=&a" (tmp2)
198			: "a" (&rw->lock)
199			: "memory");
200}
201
202#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
203#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
204
205#endif	/* _XTENSA_SPINLOCK_H */
206