1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15 * (the type definitions are in asm/spinlock_types.h)
16 */
17
18#ifndef _ASM_TILE_SPINLOCK_64_H
19#define _ASM_TILE_SPINLOCK_64_H
20
21#include <linux/compiler.h>
22
23/* Shifts and masks for the various fields in "lock". */
24#define __ARCH_SPIN_CURRENT_SHIFT	17
25#define __ARCH_SPIN_NEXT_MASK		0x7fff
26#define __ARCH_SPIN_NEXT_OVERFLOW	0x8000
27
28/*
29 * Return the "current" portion of a ticket lock value,
30 * i.e. the number that currently owns the lock.
31 */
32static inline u32 arch_spin_current(u32 val)
33{
34	return val >> __ARCH_SPIN_CURRENT_SHIFT;
35}
36
37/*
38 * Return the "next" portion of a ticket lock value,
39 * i.e. the number that the next task to try to acquire the lock will get.
40 */
41static inline u32 arch_spin_next(u32 val)
42{
43	return val & __ARCH_SPIN_NEXT_MASK;
44}
45
46/* The lock is locked if a task would have to wait to get it. */
47static inline int arch_spin_is_locked(arch_spinlock_t *lock)
48{
49	/* Use READ_ONCE() to ensure that calling this in a loop is OK. */
50	u32 val = READ_ONCE(lock->lock);
51	return arch_spin_current(val) != arch_spin_next(val);
52}
53
54/* Bump the current ticket so the next task owns the lock. */
55static inline void arch_spin_unlock(arch_spinlock_t *lock)
56{
57	wmb();  /* guarantee anything modified under the lock is visible */
58	__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
59}
60
61void arch_spin_unlock_wait(arch_spinlock_t *lock);
62
63void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
64
65/* Grab the "next" ticket number and bump it atomically.
66 * If the current ticket is not ours, go to the slow path.
67 * We also take the slow path if the "next" value overflows.
68 */
69static inline void arch_spin_lock(arch_spinlock_t *lock)
70{
71	u32 val = __insn_fetchadd4(&lock->lock, 1);
72	u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
73	if (unlikely(arch_spin_current(val) != ticket))
74		arch_spin_lock_slow(lock, ticket);
75}
76
77/* Try to get the lock, and return whether we succeeded. */
78int arch_spin_trylock(arch_spinlock_t *lock);
79
80/* We cannot take an interrupt after getting a ticket, so don't enable them. */
81#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
82
83/*
84 * Read-write spinlocks, allowing multiple readers
85 * but only one writer.
86 *
87 * We use fetchadd() for readers, and fetchor() with the sign bit
88 * for writers.
89 */
90
91#define __WRITE_LOCK_BIT (1 << 31)
92
93static inline int arch_write_val_locked(int val)
94{
95	return val < 0;  /* Optimize "val & __WRITE_LOCK_BIT". */
96}
97
98/**
99 * read_can_lock - would read_trylock() succeed?
100 * @lock: the rwlock in question.
101 */
102static inline int arch_read_can_lock(arch_rwlock_t *rw)
103{
104	return !arch_write_val_locked(rw->lock);
105}
106
107/**
108 * write_can_lock - would write_trylock() succeed?
109 * @lock: the rwlock in question.
110 */
111static inline int arch_write_can_lock(arch_rwlock_t *rw)
112{
113	return rw->lock == 0;
114}
115
116extern void __read_lock_failed(arch_rwlock_t *rw);
117
118static inline void arch_read_lock(arch_rwlock_t *rw)
119{
120	u32 val = __insn_fetchaddgez4(&rw->lock, 1);
121	if (unlikely(arch_write_val_locked(val)))
122		__read_lock_failed(rw);
123}
124
125extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
126
127static inline void arch_write_lock(arch_rwlock_t *rw)
128{
129	u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
130	if (unlikely(val != 0))
131		__write_lock_failed(rw, val);
132}
133
134static inline void arch_read_unlock(arch_rwlock_t *rw)
135{
136	__insn_mf();
137	__insn_fetchadd4(&rw->lock, -1);
138}
139
140static inline void arch_write_unlock(arch_rwlock_t *rw)
141{
142	__insn_mf();
143	__insn_exch4(&rw->lock, 0);  /* Avoid waiting in the write buffer. */
144}
145
146static inline int arch_read_trylock(arch_rwlock_t *rw)
147{
148	return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
149}
150
151static inline int arch_write_trylock(arch_rwlock_t *rw)
152{
153	u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
154	if (likely(val == 0))
155		return 1;
156	if (!arch_write_val_locked(val))
157		__insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
158	return 0;
159}
160
161#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
162#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
163
164#endif /* _ASM_TILE_SPINLOCK_64_H */
165