root/include/net/page_pool.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. page_pool_dev_alloc_pages
  2. page_pool_get_dma_dir
  3. page_pool_destroy
  4. page_pool_use_xdp_mem
  5. page_pool_put_page
  6. page_pool_recycle_direct
  7. page_pool_release_page
  8. page_pool_get_dma_addr
  9. is_page_pool_compiled_in
  10. page_pool_put

   1 /* SPDX-License-Identifier: GPL-2.0
   2  *
   3  * page_pool.h
   4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
   5  *      Copyright (C) 2016 Red Hat, Inc.
   6  */
   7 
   8 /**
   9  * DOC: page_pool allocator
  10  *
  11  * This page_pool allocator is optimized for the XDP mode that
  12  * uses one-frame-per-page, but have fallbacks that act like the
  13  * regular page allocator APIs.
  14  *
  15  * Basic use involve replacing alloc_pages() calls with the
  16  * page_pool_alloc_pages() call.  Drivers should likely use
  17  * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
  18  *
  19  * API keeps track of in-flight pages, in-order to let API user know
  20  * when it is safe to dealloactor page_pool object.  Thus, API users
  21  * must make sure to call page_pool_release_page() when a page is
  22  * "leaving" the page_pool.  Or call page_pool_put_page() where
  23  * appropiate.  For maintaining correct accounting.
  24  *
  25  * API user must only call page_pool_put_page() once on a page, as it
  26  * will either recycle the page, or in case of elevated refcnt, it
  27  * will release the DMA mapping and in-flight state accounting.  We
  28  * hope to lift this requirement in the future.
  29  */
  30 #ifndef _NET_PAGE_POOL_H
  31 #define _NET_PAGE_POOL_H
  32 
  33 #include <linux/mm.h> /* Needed by ptr_ring */
  34 #include <linux/ptr_ring.h>
  35 #include <linux/dma-direction.h>
  36 
  37 #define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
  38 #define PP_FLAG_ALL     PP_FLAG_DMA_MAP
  39 
  40 /*
  41  * Fast allocation side cache array/stack
  42  *
  43  * The cache size and refill watermark is related to the network
  44  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
  45  * ring is usually refilled and the max consumed elements will be 64,
  46  * thus a natural max size of objects needed in the cache.
  47  *
  48  * Keeping room for more objects, is due to XDP_DROP use-case.  As
  49  * XDP_DROP allows the opportunity to recycle objects directly into
  50  * this array, as it shares the same softirq/NAPI protection.  If
  51  * cache is already full (or partly full) then the XDP_DROP recycles
  52  * would have to take a slower code path.
  53  */
  54 #define PP_ALLOC_CACHE_SIZE     128
  55 #define PP_ALLOC_CACHE_REFILL   64
  56 struct pp_alloc_cache {
  57         u32 count;
  58         void *cache[PP_ALLOC_CACHE_SIZE];
  59 };
  60 
  61 struct page_pool_params {
  62         unsigned int    flags;
  63         unsigned int    order;
  64         unsigned int    pool_size;
  65         int             nid;  /* Numa node id to allocate from pages from */
  66         struct device   *dev; /* device, for DMA pre-mapping purposes */
  67         enum dma_data_direction dma_dir; /* DMA mapping direction */
  68 };
  69 
  70 struct page_pool {
  71         struct page_pool_params p;
  72 
  73         struct delayed_work release_dw;
  74         void (*disconnect)(void *);
  75         unsigned long defer_start;
  76         unsigned long defer_warn;
  77 
  78         u32 pages_state_hold_cnt;
  79 
  80         /*
  81          * Data structure for allocation side
  82          *
  83          * Drivers allocation side usually already perform some kind
  84          * of resource protection.  Piggyback on this protection, and
  85          * require driver to protect allocation side.
  86          *
  87          * For NIC drivers this means, allocate a page_pool per
  88          * RX-queue. As the RX-queue is already protected by
  89          * Softirq/BH scheduling and napi_schedule. NAPI schedule
  90          * guarantee that a single napi_struct will only be scheduled
  91          * on a single CPU (see napi_schedule).
  92          */
  93         struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
  94 
  95         /* Data structure for storing recycled pages.
  96          *
  97          * Returning/freeing pages is more complicated synchronization
  98          * wise, because free's can happen on remote CPUs, with no
  99          * association with allocation resource.
 100          *
 101          * Use ptr_ring, as it separates consumer and producer
 102          * effeciently, it a way that doesn't bounce cache-lines.
 103          *
 104          * TODO: Implement bulk return pages into this structure.
 105          */
 106         struct ptr_ring ring;
 107 
 108         atomic_t pages_state_release_cnt;
 109 
 110         /* A page_pool is strictly tied to a single RX-queue being
 111          * protected by NAPI, due to above pp_alloc_cache. This
 112          * refcnt serves purpose is to simplify drivers error handling.
 113          */
 114         refcount_t user_cnt;
 115 };
 116 
 117 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
 118 
 119 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
 120 {
 121         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
 122 
 123         return page_pool_alloc_pages(pool, gfp);
 124 }
 125 
 126 /* get the stored dma direction. A driver might decide to treat this locally and
 127  * avoid the extra cache line from page_pool to determine the direction
 128  */
 129 static
 130 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
 131 {
 132         return pool->p.dma_dir;
 133 }
 134 
 135 struct page_pool *page_pool_create(const struct page_pool_params *params);
 136 
 137 #ifdef CONFIG_PAGE_POOL
 138 void page_pool_destroy(struct page_pool *pool);
 139 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
 140 #else
 141 static inline void page_pool_destroy(struct page_pool *pool)
 142 {
 143 }
 144 
 145 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
 146                                          void (*disconnect)(void *))
 147 {
 148 }
 149 #endif
 150 
 151 /* Never call this directly, use helpers below */
 152 void __page_pool_put_page(struct page_pool *pool,
 153                           struct page *page, bool allow_direct);
 154 
 155 static inline void page_pool_put_page(struct page_pool *pool,
 156                                       struct page *page, bool allow_direct)
 157 {
 158         /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
 159          * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
 160          */
 161 #ifdef CONFIG_PAGE_POOL
 162         __page_pool_put_page(pool, page, allow_direct);
 163 #endif
 164 }
 165 /* Very limited use-cases allow recycle direct */
 166 static inline void page_pool_recycle_direct(struct page_pool *pool,
 167                                             struct page *page)
 168 {
 169         __page_pool_put_page(pool, page, true);
 170 }
 171 
 172 /* Disconnects a page (from a page_pool).  API users can have a need
 173  * to disconnect a page (from a page_pool), to allow it to be used as
 174  * a regular page (that will eventually be returned to the normal
 175  * page-allocator via put_page).
 176  */
 177 void page_pool_unmap_page(struct page_pool *pool, struct page *page);
 178 static inline void page_pool_release_page(struct page_pool *pool,
 179                                           struct page *page)
 180 {
 181 #ifdef CONFIG_PAGE_POOL
 182         page_pool_unmap_page(pool, page);
 183 #endif
 184 }
 185 
 186 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 187 {
 188         return page->dma_addr;
 189 }
 190 
 191 static inline bool is_page_pool_compiled_in(void)
 192 {
 193 #ifdef CONFIG_PAGE_POOL
 194         return true;
 195 #else
 196         return false;
 197 #endif
 198 }
 199 
 200 static inline bool page_pool_put(struct page_pool *pool)
 201 {
 202         return refcount_dec_and_test(&pool->user_cnt);
 203 }
 204 
 205 #endif /* _NET_PAGE_POOL_H */

/* [<][>][^][v][top][bottom][index][help] */