This source file includes following definitions.
- percpu_count_ptr
- percpu_ref_init
- percpu_ref_exit
- percpu_ref_call_confirm_rcu
- percpu_ref_switch_to_atomic_rcu
- percpu_ref_noop_confirm_switch
- __percpu_ref_switch_to_atomic
- __percpu_ref_switch_to_percpu
- __percpu_ref_switch_mode
- percpu_ref_switch_to_atomic
- percpu_ref_switch_to_atomic_sync
- percpu_ref_switch_to_percpu
- percpu_ref_kill_and_confirm
- percpu_ref_reinit
- percpu_ref_resurrect
1
2 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
3
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/wait.h>
7 #include <linux/percpu-refcount.h>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
36
37 static DEFINE_SPINLOCK(percpu_ref_switch_lock);
38 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
39
40 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
41 {
42 return (unsigned long __percpu *)
43 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
44 }
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
61 unsigned int flags, gfp_t gfp)
62 {
63 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
64 __alignof__(unsigned long));
65 unsigned long start_count = 0;
66
67 ref->percpu_count_ptr = (unsigned long)
68 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
69 if (!ref->percpu_count_ptr)
70 return -ENOMEM;
71
72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
73 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
74
75 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
76 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
77 ref->allow_reinit = true;
78 } else {
79 start_count += PERCPU_COUNT_BIAS;
80 }
81
82 if (flags & PERCPU_REF_INIT_DEAD)
83 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
84 else
85 start_count++;
86
87 atomic_long_set(&ref->count, start_count);
88
89 ref->release = release;
90 ref->confirm_switch = NULL;
91 return 0;
92 }
93 EXPORT_SYMBOL_GPL(percpu_ref_init);
94
95
96
97
98
99
100
101
102
103
104
105 void percpu_ref_exit(struct percpu_ref *ref)
106 {
107 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
108
109 if (percpu_count) {
110
111 WARN_ON_ONCE(ref->confirm_switch);
112 free_percpu(percpu_count);
113 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
114 }
115 }
116 EXPORT_SYMBOL_GPL(percpu_ref_exit);
117
118 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
119 {
120 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
121
122 ref->confirm_switch(ref);
123 ref->confirm_switch = NULL;
124 wake_up_all(&percpu_ref_switch_waitq);
125
126 if (!ref->allow_reinit)
127 percpu_ref_exit(ref);
128
129
130 percpu_ref_put(ref);
131 }
132
133 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
134 {
135 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
136 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
137 unsigned long count = 0;
138 int cpu;
139
140 for_each_possible_cpu(cpu)
141 count += *per_cpu_ptr(percpu_count, cpu);
142
143 pr_debug("global %ld percpu %ld",
144 atomic_long_read(&ref->count), (long)count);
145
146
147
148
149
150
151
152
153
154
155
156
157
158 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
159
160 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
161 "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
162 ref->release, atomic_long_read(&ref->count));
163
164
165 percpu_ref_call_confirm_rcu(rcu);
166 }
167
168 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
169 {
170 }
171
172 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
173 percpu_ref_func_t *confirm_switch)
174 {
175 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
176 if (confirm_switch)
177 confirm_switch(ref);
178 return;
179 }
180
181
182 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
183
184
185
186
187
188 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
189
190 percpu_ref_get(ref);
191 call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
192 }
193
194 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
195 {
196 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
197 int cpu;
198
199 BUG_ON(!percpu_count);
200
201 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
202 return;
203
204 if (WARN_ON_ONCE(!ref->allow_reinit))
205 return;
206
207 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
208
209
210
211
212
213
214
215 for_each_possible_cpu(cpu)
216 *per_cpu_ptr(percpu_count, cpu) = 0;
217
218 smp_store_release(&ref->percpu_count_ptr,
219 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
220 }
221
222 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
223 percpu_ref_func_t *confirm_switch)
224 {
225 lockdep_assert_held(&percpu_ref_switch_lock);
226
227
228
229
230
231
232 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
233 percpu_ref_switch_lock);
234
235 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
236 __percpu_ref_switch_to_atomic(ref, confirm_switch);
237 else
238 __percpu_ref_switch_to_percpu(ref);
239 }
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
262 percpu_ref_func_t *confirm_switch)
263 {
264 unsigned long flags;
265
266 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
267
268 ref->force_atomic = true;
269 __percpu_ref_switch_mode(ref, confirm_switch);
270
271 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
272 }
273 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
274
275
276
277
278
279
280
281
282
283 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
284 {
285 percpu_ref_switch_to_atomic(ref, NULL);
286 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
287 }
288 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
309 {
310 unsigned long flags;
311
312 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
313
314 ref->force_atomic = false;
315 __percpu_ref_switch_mode(ref, NULL);
316
317 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
318 }
319 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
339 percpu_ref_func_t *confirm_kill)
340 {
341 unsigned long flags;
342
343 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
344
345 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
346 "%s called more than once on %ps!", __func__, ref->release);
347
348 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
349 __percpu_ref_switch_mode(ref, confirm_kill);
350 percpu_ref_put(ref);
351
352 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
353 }
354 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
355
356
357
358
359
360
361
362
363
364
365
366
367 void percpu_ref_reinit(struct percpu_ref *ref)
368 {
369 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
370
371 percpu_ref_resurrect(ref);
372 }
373 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389 void percpu_ref_resurrect(struct percpu_ref *ref)
390 {
391 unsigned long __percpu *percpu_count;
392 unsigned long flags;
393
394 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
395
396 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
397 WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
398
399 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
400 percpu_ref_get(ref);
401 __percpu_ref_switch_mode(ref, NULL);
402
403 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
404 }
405 EXPORT_SYMBOL_GPL(percpu_ref_resurrect);