This source file includes following definitions.
- queued_read_lock_slowpath
- queued_write_lock_slowpath
1
2
3
4
5
6
7
8
9 #include <linux/smp.h>
10 #include <linux/bug.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/spinlock.h>
15 #include <asm/qrwlock.h>
16
17
18
19
20
21 void queued_read_lock_slowpath(struct qrwlock *lock)
22 {
23
24
25
26 if (unlikely(in_interrupt())) {
27
28
29
30
31
32
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34 return;
35 }
36 atomic_sub(_QR_BIAS, &lock->cnts);
37
38
39
40
41 arch_spin_lock(&lock->wait_lock);
42 atomic_add(_QR_BIAS, &lock->cnts);
43
44
45
46
47
48
49 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
50
51
52
53
54 arch_spin_unlock(&lock->wait_lock);
55 }
56 EXPORT_SYMBOL(queued_read_lock_slowpath);
57
58
59
60
61
62 void queued_write_lock_slowpath(struct qrwlock *lock)
63 {
64
65 arch_spin_lock(&lock->wait_lock);
66
67
68 if (!atomic_read(&lock->cnts) &&
69 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
70 goto unlock;
71
72
73 atomic_add(_QW_WAITING, &lock->cnts);
74
75
76 do {
77 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
78 } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
79 _QW_LOCKED) != _QW_WAITING);
80 unlock:
81 arch_spin_unlock(&lock->wait_lock);
82 }
83 EXPORT_SYMBOL(queued_write_lock_slowpath);