This source file includes following definitions.
- arch_spin_unlock
- arch_spin_trylock
- arch_spin_lock
- arch_read_lock
- arch_write_lock
- arch_read_trylock
- arch_write_trylock
- arch_read_unlock
- arch_write_unlock
1
2
3
4
5
6
7 #ifndef _ASM_RISCV_SPINLOCK_H
8 #define _ASM_RISCV_SPINLOCK_H
9
10 #include <linux/kernel.h>
11 #include <asm/current.h>
12 #include <asm/fence.h>
13
14
15
16
17
18
19
20 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
21
22 static inline void arch_spin_unlock(arch_spinlock_t *lock)
23 {
24 smp_store_release(&lock->lock, 0);
25 }
26
27 static inline int arch_spin_trylock(arch_spinlock_t *lock)
28 {
29 int tmp = 1, busy;
30
31 __asm__ __volatile__ (
32 " amoswap.w %0, %2, %1\n"
33 RISCV_ACQUIRE_BARRIER
34 : "=r" (busy), "+A" (lock->lock)
35 : "r" (tmp)
36 : "memory");
37
38 return !busy;
39 }
40
41 static inline void arch_spin_lock(arch_spinlock_t *lock)
42 {
43 while (1) {
44 if (arch_spin_is_locked(lock))
45 continue;
46
47 if (arch_spin_trylock(lock))
48 break;
49 }
50 }
51
52
53
54 static inline void arch_read_lock(arch_rwlock_t *lock)
55 {
56 int tmp;
57
58 __asm__ __volatile__(
59 "1: lr.w %1, %0\n"
60 " bltz %1, 1b\n"
61 " addi %1, %1, 1\n"
62 " sc.w %1, %1, %0\n"
63 " bnez %1, 1b\n"
64 RISCV_ACQUIRE_BARRIER
65 : "+A" (lock->lock), "=&r" (tmp)
66 :: "memory");
67 }
68
69 static inline void arch_write_lock(arch_rwlock_t *lock)
70 {
71 int tmp;
72
73 __asm__ __volatile__(
74 "1: lr.w %1, %0\n"
75 " bnez %1, 1b\n"
76 " li %1, -1\n"
77 " sc.w %1, %1, %0\n"
78 " bnez %1, 1b\n"
79 RISCV_ACQUIRE_BARRIER
80 : "+A" (lock->lock), "=&r" (tmp)
81 :: "memory");
82 }
83
84 static inline int arch_read_trylock(arch_rwlock_t *lock)
85 {
86 int busy;
87
88 __asm__ __volatile__(
89 "1: lr.w %1, %0\n"
90 " bltz %1, 1f\n"
91 " addi %1, %1, 1\n"
92 " sc.w %1, %1, %0\n"
93 " bnez %1, 1b\n"
94 RISCV_ACQUIRE_BARRIER
95 "1:\n"
96 : "+A" (lock->lock), "=&r" (busy)
97 :: "memory");
98
99 return !busy;
100 }
101
102 static inline int arch_write_trylock(arch_rwlock_t *lock)
103 {
104 int busy;
105
106 __asm__ __volatile__(
107 "1: lr.w %1, %0\n"
108 " bnez %1, 1f\n"
109 " li %1, -1\n"
110 " sc.w %1, %1, %0\n"
111 " bnez %1, 1b\n"
112 RISCV_ACQUIRE_BARRIER
113 "1:\n"
114 : "+A" (lock->lock), "=&r" (busy)
115 :: "memory");
116
117 return !busy;
118 }
119
120 static inline void arch_read_unlock(arch_rwlock_t *lock)
121 {
122 __asm__ __volatile__(
123 RISCV_RELEASE_BARRIER
124 " amoadd.w x0, %1, %0\n"
125 : "+A" (lock->lock)
126 : "r" (-1)
127 : "memory");
128 }
129
130 static inline void arch_write_unlock(arch_rwlock_t *lock)
131 {
132 smp_store_release(&lock->lock, 0);
133 }
134
135 #endif