This source file includes following definitions.
- down
- down_interruptible
- down_killable
- down_trylock
- down_timeout
- up
- __down_common
- __down
- __down_interruptible
- __down_killable
- __down_timeout
- __up
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <linux/compiler.h>
28 #include <linux/kernel.h>
29 #include <linux/export.h>
30 #include <linux/sched.h>
31 #include <linux/sched/debug.h>
32 #include <linux/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/ftrace.h>
35
36 static noinline void __down(struct semaphore *sem);
37 static noinline int __down_interruptible(struct semaphore *sem);
38 static noinline int __down_killable(struct semaphore *sem);
39 static noinline int __down_timeout(struct semaphore *sem, long timeout);
40 static noinline void __up(struct semaphore *sem);
41
42
43
44
45
46
47
48
49
50
51
52
53 void down(struct semaphore *sem)
54 {
55 unsigned long flags;
56
57 raw_spin_lock_irqsave(&sem->lock, flags);
58 if (likely(sem->count > 0))
59 sem->count--;
60 else
61 __down(sem);
62 raw_spin_unlock_irqrestore(&sem->lock, flags);
63 }
64 EXPORT_SYMBOL(down);
65
66
67
68
69
70
71
72
73
74
75 int down_interruptible(struct semaphore *sem)
76 {
77 unsigned long flags;
78 int result = 0;
79
80 raw_spin_lock_irqsave(&sem->lock, flags);
81 if (likely(sem->count > 0))
82 sem->count--;
83 else
84 result = __down_interruptible(sem);
85 raw_spin_unlock_irqrestore(&sem->lock, flags);
86
87 return result;
88 }
89 EXPORT_SYMBOL(down_interruptible);
90
91
92
93
94
95
96
97
98
99
100
101 int down_killable(struct semaphore *sem)
102 {
103 unsigned long flags;
104 int result = 0;
105
106 raw_spin_lock_irqsave(&sem->lock, flags);
107 if (likely(sem->count > 0))
108 sem->count--;
109 else
110 result = __down_killable(sem);
111 raw_spin_unlock_irqrestore(&sem->lock, flags);
112
113 return result;
114 }
115 EXPORT_SYMBOL(down_killable);
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 int down_trylock(struct semaphore *sem)
131 {
132 unsigned long flags;
133 int count;
134
135 raw_spin_lock_irqsave(&sem->lock, flags);
136 count = sem->count - 1;
137 if (likely(count >= 0))
138 sem->count = count;
139 raw_spin_unlock_irqrestore(&sem->lock, flags);
140
141 return (count < 0);
142 }
143 EXPORT_SYMBOL(down_trylock);
144
145
146
147
148
149
150
151
152
153
154
155 int down_timeout(struct semaphore *sem, long timeout)
156 {
157 unsigned long flags;
158 int result = 0;
159
160 raw_spin_lock_irqsave(&sem->lock, flags);
161 if (likely(sem->count > 0))
162 sem->count--;
163 else
164 result = __down_timeout(sem, timeout);
165 raw_spin_unlock_irqrestore(&sem->lock, flags);
166
167 return result;
168 }
169 EXPORT_SYMBOL(down_timeout);
170
171
172
173
174
175
176
177
178 void up(struct semaphore *sem)
179 {
180 unsigned long flags;
181
182 raw_spin_lock_irqsave(&sem->lock, flags);
183 if (likely(list_empty(&sem->wait_list)))
184 sem->count++;
185 else
186 __up(sem);
187 raw_spin_unlock_irqrestore(&sem->lock, flags);
188 }
189 EXPORT_SYMBOL(up);
190
191
192
193 struct semaphore_waiter {
194 struct list_head list;
195 struct task_struct *task;
196 bool up;
197 };
198
199
200
201
202
203
204 static inline int __sched __down_common(struct semaphore *sem, long state,
205 long timeout)
206 {
207 struct semaphore_waiter waiter;
208
209 list_add_tail(&waiter.list, &sem->wait_list);
210 waiter.task = current;
211 waiter.up = false;
212
213 for (;;) {
214 if (signal_pending_state(state, current))
215 goto interrupted;
216 if (unlikely(timeout <= 0))
217 goto timed_out;
218 __set_current_state(state);
219 raw_spin_unlock_irq(&sem->lock);
220 timeout = schedule_timeout(timeout);
221 raw_spin_lock_irq(&sem->lock);
222 if (waiter.up)
223 return 0;
224 }
225
226 timed_out:
227 list_del(&waiter.list);
228 return -ETIME;
229
230 interrupted:
231 list_del(&waiter.list);
232 return -EINTR;
233 }
234
235 static noinline void __sched __down(struct semaphore *sem)
236 {
237 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
238 }
239
240 static noinline int __sched __down_interruptible(struct semaphore *sem)
241 {
242 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
243 }
244
245 static noinline int __sched __down_killable(struct semaphore *sem)
246 {
247 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
248 }
249
250 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
251 {
252 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
253 }
254
255 static noinline void __sched __up(struct semaphore *sem)
256 {
257 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
258 struct semaphore_waiter, list);
259 list_del(&waiter->list);
260 waiter->up = true;
261 wake_up_process(waiter->task);
262 }