This source file includes following definitions.
- page_pool_init
- page_pool_create
- __page_pool_get_cached
- __page_pool_alloc_pages_slow
- page_pool_alloc_pages
- page_pool_inflight
- __page_pool_clean_page
- page_pool_unmap_page
- __page_pool_return_page
- __page_pool_recycle_into_ring
- __page_pool_recycle_direct
- __page_pool_put_page
- __page_pool_empty_ring
- page_pool_free
- page_pool_scrub
- page_pool_release
- page_pool_release_retry
- page_pool_use_xdp_mem
- page_pool_destroy
1
2
3
4
5
6
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h>
18
19 #include <trace/events/page_pool.h>
20
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
23
24 static int page_pool_init(struct page_pool *pool,
25 const struct page_pool_params *params)
26 {
27 unsigned int ring_qsize = 1024;
28
29 memcpy(&pool->p, params, sizeof(pool->p));
30
31
32 if (pool->p.flags & ~(PP_FLAG_ALL))
33 return -EINVAL;
34
35 if (pool->p.pool_size)
36 ring_qsize = pool->p.pool_size;
37
38
39 if (ring_qsize > 32768)
40 return -E2BIG;
41
42
43
44
45
46 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
47 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
48 return -EINVAL;
49
50 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
51 return -ENOMEM;
52
53 atomic_set(&pool->pages_state_release_cnt, 0);
54
55
56 refcount_set(&pool->user_cnt, 1);
57
58 if (pool->p.flags & PP_FLAG_DMA_MAP)
59 get_device(pool->p.dev);
60
61 return 0;
62 }
63
64 struct page_pool *page_pool_create(const struct page_pool_params *params)
65 {
66 struct page_pool *pool;
67 int err;
68
69 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
70 if (!pool)
71 return ERR_PTR(-ENOMEM);
72
73 err = page_pool_init(pool, params);
74 if (err < 0) {
75 pr_warn("%s() gave up with errno %d\n", __func__, err);
76 kfree(pool);
77 return ERR_PTR(err);
78 }
79
80 return pool;
81 }
82 EXPORT_SYMBOL(page_pool_create);
83
84
85 static struct page *__page_pool_get_cached(struct page_pool *pool)
86 {
87 struct ptr_ring *r = &pool->ring;
88 bool refill = false;
89 struct page *page;
90
91
92 if (likely(in_serving_softirq())) {
93 if (likely(pool->alloc.count)) {
94
95 page = pool->alloc.cache[--pool->alloc.count];
96 return page;
97 }
98 refill = true;
99 }
100
101
102 if (__ptr_ring_empty(r))
103 return NULL;
104
105
106
107
108 spin_lock(&r->consumer_lock);
109 page = __ptr_ring_consume(r);
110 if (refill)
111 pool->alloc.count = __ptr_ring_consume_batched(r,
112 pool->alloc.cache,
113 PP_ALLOC_CACHE_REFILL);
114 spin_unlock(&r->consumer_lock);
115 return page;
116 }
117
118
119 noinline
120 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
121 gfp_t _gfp)
122 {
123 struct page *page;
124 gfp_t gfp = _gfp;
125 dma_addr_t dma;
126
127
128
129
130 if (pool->p.order)
131 gfp |= __GFP_COMP;
132
133
134
135
136
137
138
139
140
141 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
142 if (!page)
143 return NULL;
144
145 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
146 goto skip_dma_map;
147
148
149
150
151
152
153 dma = dma_map_page_attrs(pool->p.dev, page, 0,
154 (PAGE_SIZE << pool->p.order),
155 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
156 if (dma_mapping_error(pool->p.dev, dma)) {
157 put_page(page);
158 return NULL;
159 }
160 page->dma_addr = dma;
161
162 skip_dma_map:
163
164 pool->pages_state_hold_cnt++;
165
166 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
167
168
169 return page;
170 }
171
172
173
174
175 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
176 {
177 struct page *page;
178
179
180 page = __page_pool_get_cached(pool);
181 if (page)
182 return page;
183
184
185 page = __page_pool_alloc_pages_slow(pool, gfp);
186 return page;
187 }
188 EXPORT_SYMBOL(page_pool_alloc_pages);
189
190
191
192
193 #define _distance(a, b) (s32)((a) - (b))
194
195 static s32 page_pool_inflight(struct page_pool *pool)
196 {
197 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
198 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
199 s32 inflight;
200
201 inflight = _distance(hold_cnt, release_cnt);
202
203 trace_page_pool_inflight(pool, inflight, hold_cnt, release_cnt);
204 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
205
206 return inflight;
207 }
208
209
210 static void __page_pool_clean_page(struct page_pool *pool,
211 struct page *page)
212 {
213 dma_addr_t dma;
214 int count;
215
216 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
217 goto skip_dma_unmap;
218
219 dma = page->dma_addr;
220
221 dma_unmap_page_attrs(pool->p.dev, dma,
222 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
223 DMA_ATTR_SKIP_CPU_SYNC);
224 page->dma_addr = 0;
225 skip_dma_unmap:
226
227
228
229 count = atomic_inc_return(&pool->pages_state_release_cnt);
230 trace_page_pool_state_release(pool, page, count);
231 }
232
233
234 void page_pool_unmap_page(struct page_pool *pool, struct page *page)
235 {
236
237
238
239 __page_pool_clean_page(pool, page);
240 }
241 EXPORT_SYMBOL(page_pool_unmap_page);
242
243
244 static void __page_pool_return_page(struct page_pool *pool, struct page *page)
245 {
246 __page_pool_clean_page(pool, page);
247
248 put_page(page);
249
250
251
252
253 }
254
255 static bool __page_pool_recycle_into_ring(struct page_pool *pool,
256 struct page *page)
257 {
258 int ret;
259
260 if (in_serving_softirq())
261 ret = ptr_ring_produce(&pool->ring, page);
262 else
263 ret = ptr_ring_produce_bh(&pool->ring, page);
264
265 return (ret == 0) ? true : false;
266 }
267
268
269
270
271
272
273 static bool __page_pool_recycle_direct(struct page *page,
274 struct page_pool *pool)
275 {
276 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
277 return false;
278
279
280 pool->alloc.cache[pool->alloc.count++] = page;
281 return true;
282 }
283
284 void __page_pool_put_page(struct page_pool *pool,
285 struct page *page, bool allow_direct)
286 {
287
288
289
290
291
292
293 if (likely(page_ref_count(page) == 1)) {
294
295
296 if (allow_direct && in_serving_softirq())
297 if (__page_pool_recycle_direct(page, pool))
298 return;
299
300 if (!__page_pool_recycle_into_ring(pool, page)) {
301
302 __page_pool_return_page(pool, page);
303 }
304 return;
305 }
306
307
308
309
310
311
312
313
314
315
316
317
318
319 __page_pool_clean_page(pool, page);
320 put_page(page);
321 }
322 EXPORT_SYMBOL(__page_pool_put_page);
323
324 static void __page_pool_empty_ring(struct page_pool *pool)
325 {
326 struct page *page;
327
328
329 while ((page = ptr_ring_consume_bh(&pool->ring))) {
330
331 if (!(page_ref_count(page) == 1))
332 pr_crit("%s() page_pool refcnt %d violation\n",
333 __func__, page_ref_count(page));
334
335 __page_pool_return_page(pool, page);
336 }
337 }
338
339 static void page_pool_free(struct page_pool *pool)
340 {
341 if (pool->disconnect)
342 pool->disconnect(pool);
343
344 ptr_ring_cleanup(&pool->ring, NULL);
345
346 if (pool->p.flags & PP_FLAG_DMA_MAP)
347 put_device(pool->p.dev);
348
349 kfree(pool);
350 }
351
352 static void page_pool_scrub(struct page_pool *pool)
353 {
354 struct page *page;
355
356
357
358
359
360 while (pool->alloc.count) {
361 page = pool->alloc.cache[--pool->alloc.count];
362 __page_pool_return_page(pool, page);
363 }
364
365
366
367
368 __page_pool_empty_ring(pool);
369 }
370
371 static int page_pool_release(struct page_pool *pool)
372 {
373 int inflight;
374
375 page_pool_scrub(pool);
376 inflight = page_pool_inflight(pool);
377 if (!inflight)
378 page_pool_free(pool);
379
380 return inflight;
381 }
382
383 static void page_pool_release_retry(struct work_struct *wq)
384 {
385 struct delayed_work *dwq = to_delayed_work(wq);
386 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
387 int inflight;
388
389 inflight = page_pool_release(pool);
390 if (!inflight)
391 return;
392
393
394 if (time_after_eq(jiffies, pool->defer_warn)) {
395 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
396
397 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
398 __func__, inflight, sec);
399 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
400 }
401
402
403 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
404 }
405
406 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
407 {
408 refcount_inc(&pool->user_cnt);
409 pool->disconnect = disconnect;
410 }
411
412 void page_pool_destroy(struct page_pool *pool)
413 {
414 if (!pool)
415 return;
416
417 if (!page_pool_put(pool))
418 return;
419
420 if (!page_pool_release(pool))
421 return;
422
423 pool->defer_start = jiffies;
424 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
425
426 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
427 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
428 }
429 EXPORT_SYMBOL(page_pool_destroy);