1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x)	((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
18#define arch_spin_unlock_wait(x) \
19	do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21static inline void arch_spin_lock(arch_spinlock_t *lock)
22{
23	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
24
25	/*
26	 * This smp_mb() is technically superfluous, we only need the one
27	 * after the lock for providing the ACQUIRE semantics.
28	 * However doing the "right" thing was regressing hackbench
29	 * so keeping this, pending further investigation
30	 */
31	smp_mb();
32
33	__asm__ __volatile__(
34	"1:	ex  %0, [%1]		\n"
35	"	breq  %0, %2, 1b	\n"
36	: "+&r" (tmp)
37	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
38	: "memory");
39
40	/*
41	 * ACQUIRE barrier to ensure load/store after taking the lock
42	 * don't "bleed-up" out of the critical section (leak-in is allowed)
43	 * http://www.spinics.net/lists/kernel/msg2010409.html
44	 *
45	 * ARCv2 only has load-load, store-store and all-all barrier
46	 * thus need the full all-all barrier
47	 */
48	smp_mb();
49}
50
51static inline int arch_spin_trylock(arch_spinlock_t *lock)
52{
53	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
54
55	smp_mb();
56
57	__asm__ __volatile__(
58	"1:	ex  %0, [%1]		\n"
59	: "+r" (tmp)
60	: "r"(&(lock->slock))
61	: "memory");
62
63	smp_mb();
64
65	return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
66}
67
68static inline void arch_spin_unlock(arch_spinlock_t *lock)
69{
70	unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
71
72	/*
73	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
74	 * is the only option
75	 */
76	smp_mb();
77
78	__asm__ __volatile__(
79	"	ex  %0, [%1]		\n"
80	: "+r" (tmp)
81	: "r"(&(lock->slock))
82	: "memory");
83
84	/*
85	 * superfluous, but keeping for now - see pairing version in
86	 * arch_spin_lock above
87	 */
88	smp_mb();
89}
90
91/*
92 * Read-write spinlocks, allowing multiple readers but only one writer.
93 *
94 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex.
96 *
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
98 */
99
100/* Would read_trylock() succeed? */
101#define arch_read_can_lock(x)	((x)->counter > 0)
102
103/* Would write_trylock() succeed? */
104#define arch_write_can_lock(x)	((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
105
106/* 1 - lock taken successfully */
107static inline int arch_read_trylock(arch_rwlock_t *rw)
108{
109	int ret = 0;
110
111	arch_spin_lock(&(rw->lock_mutex));
112
113	/*
114	 * zero means writer holds the lock exclusively, deny Reader.
115	 * Otherwise grant lock to first/subseq reader
116	 */
117	if (rw->counter > 0) {
118		rw->counter--;
119		ret = 1;
120	}
121
122	arch_spin_unlock(&(rw->lock_mutex));
123
124	smp_mb();
125	return ret;
126}
127
128/* 1 - lock taken successfully */
129static inline int arch_write_trylock(arch_rwlock_t *rw)
130{
131	int ret = 0;
132
133	arch_spin_lock(&(rw->lock_mutex));
134
135	/*
136	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
137	 * deny writer. Otherwise if unlocked grant to writer
138	 * Hence the claim that Linux rwlocks are unfair to writers.
139	 * (can be starved for an indefinite time by readers).
140	 */
141	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
142		rw->counter = 0;
143		ret = 1;
144	}
145	arch_spin_unlock(&(rw->lock_mutex));
146
147	return ret;
148}
149
150static inline void arch_read_lock(arch_rwlock_t *rw)
151{
152	while (!arch_read_trylock(rw))
153		cpu_relax();
154}
155
156static inline void arch_write_lock(arch_rwlock_t *rw)
157{
158	while (!arch_write_trylock(rw))
159		cpu_relax();
160}
161
162static inline void arch_read_unlock(arch_rwlock_t *rw)
163{
164	arch_spin_lock(&(rw->lock_mutex));
165	rw->counter++;
166	arch_spin_unlock(&(rw->lock_mutex));
167}
168
169static inline void arch_write_unlock(arch_rwlock_t *rw)
170{
171	arch_spin_lock(&(rw->lock_mutex));
172	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
173	arch_spin_unlock(&(rw->lock_mutex));
174}
175
176#define arch_read_lock_flags(lock, flags)	arch_read_lock(lock)
177#define arch_write_lock_flags(lock, flags)	arch_write_lock(lock)
178
179#define arch_spin_relax(lock)	cpu_relax()
180#define arch_read_relax(lock)	cpu_relax()
181#define arch_write_relax(lock)	cpu_relax()
182
183#endif /* __ASM_SPINLOCK_H */
184