root/tools/lib/bpf/xsk.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. xsk_ring_prod__fill_addr
  2. xsk_ring_cons__comp_addr
  3. xsk_ring_prod__tx_desc
  4. xsk_ring_cons__rx_desc
  5. xsk_ring_prod__needs_wakeup
  6. xsk_prod_nb_free
  7. xsk_cons_nb_avail
  8. xsk_ring_prod__reserve
  9. xsk_ring_prod__submit
  10. xsk_ring_cons__peek
  11. xsk_ring_cons__release
  12. xsk_umem__get_data
  13. xsk_umem__extract_addr
  14. xsk_umem__extract_offset
  15. xsk_umem__add_offset_to_addr

   1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
   2 
   3 /*
   4  * AF_XDP user-space access library.
   5  *
   6  * Copyright(c) 2018 - 2019 Intel Corporation.
   7  *
   8  * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
   9  */
  10 
  11 #ifndef __LIBBPF_XSK_H
  12 #define __LIBBPF_XSK_H
  13 
  14 #include <stdio.h>
  15 #include <stdint.h>
  16 #include <linux/if_xdp.h>
  17 
  18 #include "libbpf.h"
  19 #include "libbpf_util.h"
  20 
  21 #ifdef __cplusplus
  22 extern "C" {
  23 #endif
  24 
  25 /* Do not access these members directly. Use the functions below. */
  26 #define DEFINE_XSK_RING(name) \
  27 struct name { \
  28         __u32 cached_prod; \
  29         __u32 cached_cons; \
  30         __u32 mask; \
  31         __u32 size; \
  32         __u32 *producer; \
  33         __u32 *consumer; \
  34         void *ring; \
  35         __u32 *flags; \
  36 }
  37 
  38 DEFINE_XSK_RING(xsk_ring_prod);
  39 DEFINE_XSK_RING(xsk_ring_cons);
  40 
  41 /* For a detailed explanation on the memory barriers associated with the
  42  * ring, please take a look at net/xdp/xsk_queue.h.
  43  */
  44 
  45 struct xsk_umem;
  46 struct xsk_socket;
  47 
  48 static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
  49                                               __u32 idx)
  50 {
  51         __u64 *addrs = (__u64 *)fill->ring;
  52 
  53         return &addrs[idx & fill->mask];
  54 }
  55 
  56 static inline const __u64 *
  57 xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
  58 {
  59         const __u64 *addrs = (const __u64 *)comp->ring;
  60 
  61         return &addrs[idx & comp->mask];
  62 }
  63 
  64 static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
  65                                                       __u32 idx)
  66 {
  67         struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
  68 
  69         return &descs[idx & tx->mask];
  70 }
  71 
  72 static inline const struct xdp_desc *
  73 xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
  74 {
  75         const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
  76 
  77         return &descs[idx & rx->mask];
  78 }
  79 
  80 static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
  81 {
  82         return *r->flags & XDP_RING_NEED_WAKEUP;
  83 }
  84 
  85 static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
  86 {
  87         __u32 free_entries = r->cached_cons - r->cached_prod;
  88 
  89         if (free_entries >= nb)
  90                 return free_entries;
  91 
  92         /* Refresh the local tail pointer.
  93          * cached_cons is r->size bigger than the real consumer pointer so
  94          * that this addition can be avoided in the more frequently
  95          * executed code that computs free_entries in the beginning of
  96          * this function. Without this optimization it whould have been
  97          * free_entries = r->cached_prod - r->cached_cons + r->size.
  98          */
  99         r->cached_cons = *r->consumer + r->size;
 100 
 101         return r->cached_cons - r->cached_prod;
 102 }
 103 
 104 static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
 105 {
 106         __u32 entries = r->cached_prod - r->cached_cons;
 107 
 108         if (entries == 0) {
 109                 r->cached_prod = *r->producer;
 110                 entries = r->cached_prod - r->cached_cons;
 111         }
 112 
 113         return (entries > nb) ? nb : entries;
 114 }
 115 
 116 static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
 117                                             size_t nb, __u32 *idx)
 118 {
 119         if (xsk_prod_nb_free(prod, nb) < nb)
 120                 return 0;
 121 
 122         *idx = prod->cached_prod;
 123         prod->cached_prod += nb;
 124 
 125         return nb;
 126 }
 127 
 128 static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
 129 {
 130         /* Make sure everything has been written to the ring before indicating
 131          * this to the kernel by writing the producer pointer.
 132          */
 133         libbpf_smp_wmb();
 134 
 135         *prod->producer += nb;
 136 }
 137 
 138 static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
 139                                          size_t nb, __u32 *idx)
 140 {
 141         size_t entries = xsk_cons_nb_avail(cons, nb);
 142 
 143         if (entries > 0) {
 144                 /* Make sure we do not speculatively read the data before
 145                  * we have received the packet buffers from the ring.
 146                  */
 147                 libbpf_smp_rmb();
 148 
 149                 *idx = cons->cached_cons;
 150                 cons->cached_cons += entries;
 151         }
 152 
 153         return entries;
 154 }
 155 
 156 static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
 157 {
 158         /* Make sure data has been read before indicating we are done
 159          * with the entries by updating the consumer pointer.
 160          */
 161         libbpf_smp_rwmb();
 162 
 163         *cons->consumer += nb;
 164 }
 165 
 166 static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
 167 {
 168         return &((char *)umem_area)[addr];
 169 }
 170 
 171 static inline __u64 xsk_umem__extract_addr(__u64 addr)
 172 {
 173         return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
 174 }
 175 
 176 static inline __u64 xsk_umem__extract_offset(__u64 addr)
 177 {
 178         return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
 179 }
 180 
 181 static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
 182 {
 183         return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
 184 }
 185 
 186 LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
 187 LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
 188 
 189 #define XSK_RING_CONS__DEFAULT_NUM_DESCS      2048
 190 #define XSK_RING_PROD__DEFAULT_NUM_DESCS      2048
 191 #define XSK_UMEM__DEFAULT_FRAME_SHIFT    12 /* 4096 bytes */
 192 #define XSK_UMEM__DEFAULT_FRAME_SIZE     (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
 193 #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
 194 #define XSK_UMEM__DEFAULT_FLAGS 0
 195 
 196 struct xsk_umem_config {
 197         __u32 fill_size;
 198         __u32 comp_size;
 199         __u32 frame_size;
 200         __u32 frame_headroom;
 201         __u32 flags;
 202 };
 203 
 204 /* Flags for the libbpf_flags field. */
 205 #define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
 206 
 207 struct xsk_socket_config {
 208         __u32 rx_size;
 209         __u32 tx_size;
 210         __u32 libbpf_flags;
 211         __u32 xdp_flags;
 212         __u16 bind_flags;
 213 };
 214 
 215 /* Set config to NULL to get the default configuration. */
 216 LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
 217                                 void *umem_area, __u64 size,
 218                                 struct xsk_ring_prod *fill,
 219                                 struct xsk_ring_cons *comp,
 220                                 const struct xsk_umem_config *config);
 221 LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
 222                                        void *umem_area, __u64 size,
 223                                        struct xsk_ring_prod *fill,
 224                                        struct xsk_ring_cons *comp,
 225                                        const struct xsk_umem_config *config);
 226 LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
 227                                        void *umem_area, __u64 size,
 228                                        struct xsk_ring_prod *fill,
 229                                        struct xsk_ring_cons *comp,
 230                                        const struct xsk_umem_config *config);
 231 LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
 232                                   const char *ifname, __u32 queue_id,
 233                                   struct xsk_umem *umem,
 234                                   struct xsk_ring_cons *rx,
 235                                   struct xsk_ring_prod *tx,
 236                                   const struct xsk_socket_config *config);
 237 
 238 /* Returns 0 for success and -EBUSY if the umem is still in use. */
 239 LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
 240 LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
 241 
 242 #ifdef __cplusplus
 243 } /* extern "C" */
 244 #endif
 245 
 246 #endif /* __LIBBPF_XSK_H */

/* [<][>][^][v][top][bottom][index][help] */