xdp 118 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c struct xdp_buff xdp; xdp 136 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp.data_hard_start = *data_ptr - offset; xdp 137 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp.data = *data_ptr; xdp 138 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp_set_data_meta_invalid(&xdp); xdp 139 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp.data_end = *data_ptr + *len; xdp 140 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp.rxq = &rxr->xdp_rxq; xdp 141 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c orig_data = xdp.data; xdp 144 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 154 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c *len = xdp.data_end - xdp.data; xdp 155 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c if (orig_data != xdp.data) { xdp 156 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c offset = xdp.data - xdp.data_hard_start; xdp 157 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c *data_ptr = xdp.data_hard_start + offset; xdp 193 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { xdp 235 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c struct xdp_frame *xdp = frames[i]; xdp 239 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp_return_frame_rx_napi(xdp); xdp 244 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, xdp 248 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp_return_frame_rx_napi(xdp); xdp 252 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); xdp 323 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 328 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c switch (xdp->command) { xdp 330 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c rc = bnxt_xdp_set(bp, xdp->prog); xdp 333 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; xdp 20 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); xdp 534 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct xdp_buff xdp; xdp 551 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp.data_hard_start = page_address(page); xdp 552 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp.data = (void *)cpu_addr; xdp 553 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp_set_data_meta_invalid(&xdp); xdp 554 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp.data_end = xdp.data + len; xdp 555 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp.rxq = &rq->xdp_rxq; xdp 556 drivers/net/ethernet/cavium/thunder/nicvf_main.c orig_data = xdp.data; xdp 559 drivers/net/ethernet/cavium/thunder/nicvf_main.c action = bpf_prog_run_xdp(prog, &xdp); xdp 562 drivers/net/ethernet/cavium/thunder/nicvf_main.c len = xdp.data_end - xdp.data; xdp 564 drivers/net/ethernet/cavium/thunder/nicvf_main.c if (orig_data != xdp.data) { xdp 565 drivers/net/ethernet/cavium/thunder/nicvf_main.c offset = orig_data - xdp.data; xdp 585 drivers/net/ethernet/cavium/thunder/nicvf_main.c *skb = build_skb(xdp.data, xdp 593 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); xdp 1900 drivers/net/ethernet/cavium/thunder/nicvf_main.c static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) xdp 1911 drivers/net/ethernet/cavium/thunder/nicvf_main.c switch (xdp->command) { xdp 1913 drivers/net/ethernet/cavium/thunder/nicvf_main.c return nicvf_xdp_setup(nic, xdp->prog); xdp 1915 drivers/net/ethernet/cavium/thunder/nicvf_main.c xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; xdp 1638 drivers/net/ethernet/cavium/thunder/nicvf_queues.c u64 buf_addr, bool xdp) xdp 1643 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (xdp) { xdp 1663 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct cqe_rx_t *cqe_rx, bool xdp) xdp 1700 drivers/net/ethernet/cavium/thunder/nicvf_queues.c phys_addr, xdp); xdp 1710 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp); xdp 350 drivers/net/ethernet/cavium/thunder/nicvf_queues.h struct cqe_rx_t *cqe_rx, bool xdp); xdp 226 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; xdp 227 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) xdp 231 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.drop_bufs, xdp 232 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.drop_cnt)) == -EBUSY) xdp 236 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); xdp 237 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->buf_count -= ch->xdp.drop_cnt; xdp 240 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.drop_cnt = 0; xdp 283 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct xdp_buff xdp; xdp 291 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_prog = READ_ONCE(ch->xdp.prog); xdp 295 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data = vaddr + dpaa2_fd_get_offset(fd); xdp 296 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); xdp 297 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp 298 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_set_data_meta_invalid(&xdp); xdp 299 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.rxq = &ch->xdp_rxq; xdp 301 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 304 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dpaa2_fd_set_offset(fd, xdp.data - vaddr); xdp 305 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); xdp 336 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp.data_hard_start = vaddr; xdp 337 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); xdp 345 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.res |= xdp_act; xdp 699 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, xdp 724 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_return_frame(swa->xdp.xdpf); xdp 1124 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c ch->xdp.res = 0; xdp 1183 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (ch->xdp.res & XDP_REDIRECT) xdp 1807 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c old = xchg(&ch->xdp.prog, prog); xdp 1829 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 1833 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c switch (xdp->command) { xdp 1835 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c return setup_xdp(dev, xdp->prog); xdp 1837 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; xdp 1884 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; xdp 1885 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c swa->xdp.xdpf = xdpf; xdp 1888 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c swa->xdp.dma_size, xdp 117 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h } xdp; xdp 339 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h struct dpaa2_eth_ch_xdp xdp; xdp 12816 drivers/net/ethernet/intel/i40e/i40e_main.c struct netdev_bpf *xdp) xdp 12824 drivers/net/ethernet/intel/i40e/i40e_main.c switch (xdp->command) { xdp 12826 drivers/net/ethernet/intel/i40e/i40e_main.c return i40e_xdp_setup(vsi, xdp->prog); xdp 12828 drivers/net/ethernet/intel/i40e/i40e_main.c xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; xdp 12831 drivers/net/ethernet/intel/i40e/i40e_main.c return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp 12832 drivers/net/ethernet/intel/i40e/i40e_main.c xdp->xsk.queue_id); xdp 1996 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct xdp_buff *xdp) xdp 1998 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int size = xdp->data_end - xdp->data; xdp 2008 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(xdp->data); xdp 2010 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(xdp->data + L1_CACHE_BYTES); xdp 2038 drivers/net/ethernet/intel/i40e/i40e_txrx.c headlen = eth_get_headlen(skb->dev, xdp->data, xdp 2042 drivers/net/ethernet/intel/i40e/i40e_txrx.c memcpy(__skb_put(skb, headlen), xdp->data, xdp 2077 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct xdp_buff *xdp) xdp 2079 drivers/net/ethernet/intel/i40e/i40e_txrx.c unsigned int metasize = xdp->data - xdp->data_meta; xdp 2084 drivers/net/ethernet/intel/i40e/i40e_txrx.c SKB_DATA_ALIGN(xdp->data_end - xdp 2085 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp->data_hard_start); xdp 2094 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(xdp->data_meta); xdp 2096 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetch(xdp->data_meta + L1_CACHE_BYTES); xdp 2099 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = build_skb(xdp->data_hard_start, truesize); xdp 2104 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 2105 drivers/net/ethernet/intel/i40e/i40e_txrx.c __skb_put(skb, xdp->data_end - xdp->data); xdp 2181 drivers/net/ethernet/intel/i40e/i40e_txrx.c int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) xdp 2183 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); xdp 2197 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct xdp_buff *xdp) xdp 2210 drivers/net/ethernet/intel/i40e/i40e_txrx.c prefetchw(xdp->data_hard_start); /* xdp_frame write */ xdp 2212 drivers/net/ethernet/intel/i40e/i40e_txrx.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 2218 drivers/net/ethernet/intel/i40e/i40e_txrx.c result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); xdp 2221 drivers/net/ethernet/intel/i40e/i40e_txrx.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); xdp 2336 drivers/net/ethernet/intel/i40e/i40e_txrx.c struct xdp_buff xdp; xdp 2338 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.rxq = &rx_ring->xdp_rxq; xdp 2386 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.data = page_address(rx_buffer->page) + xdp 2388 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.data_meta = xdp.data; xdp 2389 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.data_hard_start = xdp.data - xdp 2391 drivers/net/ethernet/intel/i40e/i40e_txrx.c xdp.data_end = xdp.data + size; xdp 2393 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_run_xdp(rx_ring, &xdp); xdp 2410 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_build_skb(rx_ring, rx_buffer, &xdp); xdp 2412 drivers/net/ethernet/intel/i40e/i40e_txrx.c skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp); xdp 9 drivers/net/ethernet/intel/i40e/i40e_txrx_common.h int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); xdp 191 drivers/net/ethernet/intel/i40e/i40e_xsk.c static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) xdp 205 drivers/net/ethernet/intel/i40e/i40e_xsk.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 206 drivers/net/ethernet/intel/i40e/i40e_xsk.c offset = xdp->data - xdp->data_hard_start; xdp 208 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); xdp 215 drivers/net/ethernet/intel/i40e/i40e_xsk.c result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); xdp 218 drivers/net/ethernet/intel/i40e/i40e_xsk.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); xdp 488 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct xdp_buff *xdp) xdp 490 drivers/net/ethernet/intel/i40e/i40e_xsk.c unsigned int metasize = xdp->data - xdp->data_meta; xdp 491 drivers/net/ethernet/intel/i40e/i40e_xsk.c unsigned int datasize = xdp->data_end - xdp->data; xdp 496 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp->data_end - xdp->data_hard_start, xdp 501 drivers/net/ethernet/intel/i40e/i40e_xsk.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 502 drivers/net/ethernet/intel/i40e/i40e_xsk.c memcpy(__skb_put(skb, datasize), xdp->data, datasize); xdp 537 drivers/net/ethernet/intel/i40e/i40e_xsk.c struct xdp_buff xdp; xdp 539 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.rxq = &rx_ring->xdp_rxq; xdp 577 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.data = bi->addr; xdp 578 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.data_meta = xdp.data; xdp 579 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp 580 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.data_end = xdp.data + size; xdp 581 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp.handle = bi->handle; xdp 583 drivers/net/ethernet/intel/i40e/i40e_xsk.c xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); xdp 607 drivers/net/ethernet/intel/i40e/i40e_xsk.c skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); xdp 2087 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct xdp_buff *xdp, xdp 2090 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int size = xdp->data_end - xdp->data; xdp 2094 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - xdp 2095 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->data_hard_start); xdp 2100 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetch(xdp->data); xdp 2102 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetch(xdp->data + L1_CACHE_BYTES); xdp 2130 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->data - page_address(rx_buffer->page), xdp 2139 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->data, ALIGN(size, sizeof(long))); xdp 2148 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct xdp_buff *xdp, xdp 2151 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int metasize = xdp->data - xdp->data_meta; xdp 2156 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c SKB_DATA_ALIGN(xdp->data_end - xdp 2157 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->data_hard_start); xdp 2166 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetch(xdp->data_meta); xdp 2168 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetch(xdp->data_meta + L1_CACHE_BYTES); xdp 2172 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = build_skb(xdp->data_hard_start, truesize); xdp 2177 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 2178 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __skb_put(skb, xdp->data_end - xdp->data); xdp 2198 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct xdp_buff *xdp) xdp 2211 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c prefetchw(xdp->data_hard_start); /* xdp_frame write */ xdp 2213 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 2218 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdpf = convert_to_xdp_frame(xdp); xdp 2226 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); xdp 2289 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct xdp_buff xdp; xdp 2291 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.rxq = &rx_ring->xdp_rxq; xdp 2320 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.data = page_address(rx_buffer->page) + xdp 2322 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.data_meta = xdp.data; xdp 2323 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.data_hard_start = xdp.data - xdp 2325 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp.data_end = xdp.data + size; xdp 2327 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); xdp 2345 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c &xdp, rx_desc); xdp 2348 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c &xdp, rx_desc); xdp 10300 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 10304 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c switch (xdp->command) { xdp 10306 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c return ixgbe_xdp_setup(dev, xdp->prog); xdp 10308 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->prog_id = adapter->xdp_prog ? xdp 10312 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, xdp 10313 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c xdp->xsk.queue_id); xdp 144 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_buff *xdp) xdp 155 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 156 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c offset = xdp->data - xdp->data_hard_start; xdp 158 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); xdp 164 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdpf = convert_to_xdp_frame(xdp); xdp 172 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); xdp 396 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_buff *xdp) xdp 398 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c unsigned int metasize = xdp->data - xdp->data_meta; xdp 399 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c unsigned int datasize = xdp->data_end - xdp->data; xdp 404 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp->data_end - xdp->data_hard_start, xdp 409 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 410 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c memcpy(__skb_put(skb, datasize), xdp->data, datasize); xdp 437 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c struct xdp_buff xdp; xdp 439 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.rxq = &rx_ring->xdp_rxq; xdp 485 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.data = bi->addr; xdp 486 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.data_meta = xdp.data; xdp 487 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp 488 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.data_end = xdp.data + size; xdp 489 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp.handle = bi->handle; xdp 491 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); xdp 510 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); xdp 858 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct xdp_buff *xdp, xdp 861 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int size = xdp->data_end - xdp->data; xdp 865 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - xdp 866 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp->data_hard_start); xdp 872 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c prefetch(xdp->data); xdp 874 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c prefetch(xdp->data + L1_CACHE_BYTES); xdp 899 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c headlen = eth_get_headlen(skb->dev, xdp->data, xdp 903 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c memcpy(__skb_put(skb, headlen), xdp->data, xdp 910 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c (xdp->data + headlen) - xdp 935 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct xdp_buff *xdp, xdp 938 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c unsigned int metasize = xdp->data - xdp->data_meta; xdp 943 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c SKB_DATA_ALIGN(xdp->data_end - xdp 944 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp->data_hard_start); xdp 953 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c prefetch(xdp->data_meta); xdp 955 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c prefetch(xdp->data_meta + L1_CACHE_BYTES); xdp 959 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = build_skb(xdp->data_hard_start, truesize); xdp 964 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 965 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c __skb_put(skb, xdp->data_end - xdp->data); xdp 984 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct xdp_buff *xdp) xdp 992 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c len = xdp->data_end - xdp->data; xdp 997 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); xdp 1007 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c tx_buffer->data = xdp->data; xdp 1062 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct xdp_buff *xdp) xdp 1075 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 1081 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); xdp 1124 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct xdp_buff xdp; xdp 1126 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.rxq = &rx_ring->xdp_rxq; xdp 1154 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.data = page_address(rx_buffer->page) + xdp 1156 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.data_meta = xdp.data; xdp 1157 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.data_hard_start = xdp.data - xdp 1159 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp.data_end = xdp.data + size; xdp 1161 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb = ixgbevf_run_xdp(adapter, rx_ring, &xdp); xdp 1178 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c &xdp, rx_desc); xdp 1181 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c &xdp, rx_desc); xdp 4486 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 4490 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c switch (xdp->command) { xdp 4492 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c return ixgbevf_xdp_setup(dev, xdp->prog); xdp 4494 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xdp->prog_id = adapter->xdp_prog ? xdp 2900 drivers/net/ethernet/mellanox/mlx4/en_netdev.c static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 2902 drivers/net/ethernet/mellanox/mlx4/en_netdev.c switch (xdp->command) { xdp 2904 drivers/net/ethernet/mellanox/mlx4/en_netdev.c return mlx4_xdp_set(dev, xdp->prog); xdp 2906 drivers/net/ethernet/mellanox/mlx4/en_netdev.c xdp->prog_id = mlx4_xdp_query(dev); xdp 673 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct xdp_buff xdp; xdp 685 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp.rxq = &ring->xdp_rxq; xdp 779 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp.data_hard_start = va - frags[0].page_offset; xdp 780 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp.data = va; xdp 781 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp_set_data_meta_invalid(&xdp); xdp 782 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp.data_end = xdp.data + length; xdp 783 drivers/net/ethernet/mellanox/mlx4/en_rx.c orig_data = xdp.data; xdp 785 drivers/net/ethernet/mellanox/mlx4/en_rx.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 787 drivers/net/ethernet/mellanox/mlx4/en_rx.c length = xdp.data_end - xdp.data; xdp 788 drivers/net/ethernet/mellanox/mlx4/en_rx.c if (xdp.data != orig_data) { xdp 789 drivers/net/ethernet/mellanox/mlx4/en_rx.c frags[0].page_offset = xdp.data - xdp 790 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp.data_hard_start; xdp 791 drivers/net/ethernet/mellanox/mlx4/en_rx.c va = xdp.data; xdp 713 drivers/net/ethernet/mellanox/mlx5/core/en.h bool xdp; xdp 60 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_dma_info *di, struct xdp_buff *xdp) xdp 67 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdpf = convert_to_xdp_frame(xdp); xdp 74 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) { xdp 126 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct xdp_buff xdp; xdp 133 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.data = va + *rx_headroom; xdp 134 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp_set_data_meta_invalid(&xdp); xdp 135 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.data_end = xdp.data + *len; xdp 136 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.data_hard_start = va; xdp 138 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.handle = di->xsk.handle; xdp 139 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.rxq = &rq->xdp_rxq; xdp 141 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c act = bpf_prog_run_xdp(prog, &xdp); xdp 143 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c u64 off = xdp.data - xdp.data_hard_start; xdp 145 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off); xdp 149 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c *rx_headroom = xdp.data - xdp.data_hard_start; xdp 150 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c *len = xdp.data_end - xdp.data; xdp 153 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp))) xdp 159 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c err = xdp_do_redirect(rq->netdev, &xdp, prog); xdp 910 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp) xdp 1872 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, xdp 1887 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) { xdp 1908 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) xdp 1920 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) xdp 1942 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) xdp 1947 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (c->xdp) xdp 1993 drivers/net/ethernet/mellanox/mlx5/core/en_main.c c->xdp = !!params->xdp_prog; xdp 4522 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 4524 drivers/net/ethernet/mellanox/mlx5/core/en_main.c switch (xdp->command) { xdp 4526 drivers/net/ethernet/mellanox/mlx5/core/en_main.c return mlx5e_xdp_set(dev, xdp->prog); xdp 4528 drivers/net/ethernet/mellanox/mlx5/core/en_main.c xdp->prog_id = mlx5e_xdp_query(dev); xdp 4531 drivers/net/ethernet/mellanox/mlx5/core/en_main.c return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem, xdp 4532 drivers/net/ethernet/mellanox/mlx5/core/en_main.c xdp->xsk.queue_id); xdp 131 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (c->xdp) xdp 136 drivers/net/ethernet/netronome/nfp/nfp_app.h struct netdev_bpf *xdp); xdp 620 drivers/net/ethernet/netronome/nfp/nfp_net.h struct xdp_attachment_info xdp; xdp 1358 drivers/net/ethernet/netronome/nfp/nfp_net_common.c nfp_net_free_frag(void *frag, bool xdp) xdp 1360 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!xdp) xdp 1812 drivers/net/ethernet/netronome/nfp/nfp_net_common.c struct xdp_buff xdp; xdp 1818 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.rxq = &rx_ring->xdp_rxq; xdp 1911 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; xdp 1912 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.data = orig_data; xdp 1913 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.data_meta = orig_data; xdp 1914 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp.data_end = orig_data + pkt_len; xdp 1916 drivers/net/ethernet/netronome/nfp/nfp_net_common.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 1918 drivers/net/ethernet/netronome/nfp/nfp_net_common.c pkt_len = xdp.data_end - xdp.data; xdp 1919 drivers/net/ethernet/netronome/nfp/nfp_net_common.c pkt_off += xdp.data - orig_data; xdp 1923 drivers/net/ethernet/netronome/nfp/nfp_net_common.c meta_len_xdp = xdp.data - xdp.data_meta; xdp 3650 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) xdp 3655 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp_attachment_setup(&nn->xdp, bpf); xdp 3673 drivers/net/ethernet/netronome/nfp/nfp_net_common.c xdp_attachment_setup(&nn->xdp, bpf); xdp 3692 drivers/net/ethernet/netronome/nfp/nfp_net_common.c static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) xdp 3696 drivers/net/ethernet/netronome/nfp/nfp_net_common.c switch (xdp->command) { xdp 3698 drivers/net/ethernet/netronome/nfp/nfp_net_common.c return nfp_net_xdp_setup_drv(nn, xdp); xdp 3700 drivers/net/ethernet/netronome/nfp/nfp_net_common.c return nfp_net_xdp_setup_hw(nn, xdp); xdp 3702 drivers/net/ethernet/netronome/nfp/nfp_net_common.c return xdp_attachment_query(&nn->xdp, xdp); xdp 3704 drivers/net/ethernet/netronome/nfp/nfp_net_common.c return xdp_attachment_query(&nn->xdp_hw, xdp); xdp 3706 drivers/net/ethernet/netronome/nfp/nfp_net_common.c return nfp_app_bpf(nn->app, nn, xdp); xdp 150 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c struct dentry *queues, *tx, *rx, *xdp; xdp 168 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c xdp = debugfs_create_dir("xdp", queues); xdp 174 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c debugfs_create_file(name, 0400, xdp, xdp 433 drivers/net/ethernet/qlogic/qede/qede.h struct sw_tx_xdp *xdp; xdp 552 drivers/net/ethernet/qlogic/qede/qede.h int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); xdp 1114 drivers/net/ethernet/qlogic/qede/qede_filter.c int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 1118 drivers/net/ethernet/qlogic/qede/qede_filter.c switch (xdp->command) { xdp 1120 drivers/net/ethernet/qlogic/qede/qede_filter.c return qede_xdp_set(edev, xdp->prog); xdp 1122 drivers/net/ethernet/qlogic/qede/qede_filter.c xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; xdp 365 drivers/net/ethernet/qlogic/qede/qede_fp.c txq->sw_tx_ring.xdp[idx].page = metadata->data; xdp 366 drivers/net/ethernet/qlogic/qede/qede_fp.c txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; xdp 400 drivers/net/ethernet/qlogic/qede/qede_fp.c txq->sw_tx_ring.xdp[idx].mapping, xdp 402 drivers/net/ethernet/qlogic/qede/qede_fp.c __free_page(txq->sw_tx_ring.xdp[idx].page); xdp 1061 drivers/net/ethernet/qlogic/qede/qede_fp.c struct xdp_buff xdp; xdp 1064 drivers/net/ethernet/qlogic/qede/qede_fp.c xdp.data_hard_start = page_address(bd->data); xdp 1065 drivers/net/ethernet/qlogic/qede/qede_fp.c xdp.data = xdp.data_hard_start + *data_offset; xdp 1066 drivers/net/ethernet/qlogic/qede/qede_fp.c xdp_set_data_meta_invalid(&xdp); xdp 1067 drivers/net/ethernet/qlogic/qede/qede_fp.c xdp.data_end = xdp.data + *len; xdp 1068 drivers/net/ethernet/qlogic/qede/qede_fp.c xdp.rxq = &rxq->xdp_rxq; xdp 1075 drivers/net/ethernet/qlogic/qede/qede_fp.c act = bpf_prog_run_xdp(prog, &xdp); xdp 1079 drivers/net/ethernet/qlogic/qede/qede_fp.c *data_offset = xdp.data - xdp.data_hard_start; xdp 1080 drivers/net/ethernet/qlogic/qede/qede_fp.c *len = xdp.data_end - xdp.data; xdp 1465 drivers/net/ethernet/qlogic/qede/qede_main.c kfree(txq->sw_tx_ring.xdp); xdp 1483 drivers/net/ethernet/qlogic/qede/qede_main.c size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers; xdp 1484 drivers/net/ethernet/qlogic/qede/qede_main.c txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); xdp 1485 drivers/net/ethernet/qlogic/qede/qede_main.c if (!txq->sw_tx_ring.xdp) xdp 866 drivers/net/ethernet/socionext/netsec.c static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) xdp 869 drivers/net/ethernet/socionext/netsec.c struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); xdp 883 drivers/net/ethernet/socionext/netsec.c struct xdp_buff *xdp) xdp 889 drivers/net/ethernet/socionext/netsec.c act = bpf_prog_run_xdp(prog, xdp); xdp 896 drivers/net/ethernet/socionext/netsec.c ret = netsec_xdp_xmit_back(priv, xdp); xdp 898 drivers/net/ethernet/socionext/netsec.c xdp_return_buff(xdp); xdp 901 drivers/net/ethernet/socionext/netsec.c err = xdp_do_redirect(priv->ndev, xdp, prog); xdp 906 drivers/net/ethernet/socionext/netsec.c xdp_return_buff(xdp); xdp 917 drivers/net/ethernet/socionext/netsec.c xdp_return_buff(xdp); xdp 948 drivers/net/ethernet/socionext/netsec.c struct xdp_buff xdp; xdp 993 drivers/net/ethernet/socionext/netsec.c xdp.data_hard_start = desc->addr; xdp 994 drivers/net/ethernet/socionext/netsec.c xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM; xdp 995 drivers/net/ethernet/socionext/netsec.c xdp_set_data_meta_invalid(&xdp); xdp 996 drivers/net/ethernet/socionext/netsec.c xdp.data_end = xdp.data + pkt_len; xdp 997 drivers/net/ethernet/socionext/netsec.c xdp.rxq = &dring->xdp_rxq; xdp 1000 drivers/net/ethernet/socionext/netsec.c xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); xdp 1023 drivers/net/ethernet/socionext/netsec.c skb_reserve(skb, xdp.data - xdp.data_hard_start); xdp 1024 drivers/net/ethernet/socionext/netsec.c skb_put(skb, xdp.data_end - xdp.data); xdp 1035 drivers/net/ethernet/socionext/netsec.c ndev->stats.rx_bytes += xdp.data_end - xdp.data; xdp 1799 drivers/net/ethernet/socionext/netsec.c static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) xdp 1803 drivers/net/ethernet/socionext/netsec.c switch (xdp->command) { xdp 1805 drivers/net/ethernet/socionext/netsec.c return netsec_xdp_setup(priv, xdp->prog, xdp->extack); xdp 1807 drivers/net/ethernet/socionext/netsec.c xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; xdp 488 drivers/net/ethernet/ti/cpsw.c static int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp, xdp 506 drivers/net/ethernet/ti/cpsw.c act = bpf_prog_run_xdp(prog, xdp); xdp 512 drivers/net/ethernet/ti/cpsw.c xdpf = convert_to_xdp_frame(xdp); xdp 519 drivers/net/ethernet/ti/cpsw.c if (xdp_do_redirect(ndev, xdp, prog)) xdp 685 drivers/net/ethernet/ti/cpsw.c struct xdp_buff xdp; xdp 724 drivers/net/ethernet/ti/cpsw.c xdp.data = pa + CPSW_HEADROOM + xdp 726 drivers/net/ethernet/ti/cpsw.c xdp.data_end = xdp.data + len - xdp 729 drivers/net/ethernet/ti/cpsw.c xdp.data = pa + CPSW_HEADROOM; xdp 730 drivers/net/ethernet/ti/cpsw.c xdp.data_end = xdp.data + len; xdp 733 drivers/net/ethernet/ti/cpsw.c xdp_set_data_meta_invalid(&xdp); xdp 735 drivers/net/ethernet/ti/cpsw.c xdp.data_hard_start = pa; xdp 736 drivers/net/ethernet/ti/cpsw.c xdp.rxq = &priv->xdp_rxq[ch]; xdp 738 drivers/net/ethernet/ti/cpsw.c ret = cpsw_run_xdp(priv, ch, &xdp, page); xdp 743 drivers/net/ethernet/ti/cpsw.c len = xdp.data_end - xdp.data; xdp 744 drivers/net/ethernet/ti/cpsw.c headroom = xdp.data - xdp.data_hard_start; xdp 189 drivers/net/netdevsim/bpf.c struct xdp_attachment_info *xdp) xdp 193 drivers/net/netdevsim/bpf.c if (!xdp_attachment_flags_ok(xdp, bpf)) xdp 211 drivers/net/netdevsim/bpf.c xdp_attachment_setup(xdp, bpf); xdp 553 drivers/net/netdevsim/bpf.c return xdp_attachment_query(&ns->xdp, bpf); xdp 561 drivers/net/netdevsim/bpf.c return nsim_xdp_set_prog(ns, bpf, &ns->xdp); xdp 647 drivers/net/netdevsim/bpf.c WARN_ON(ns->xdp.prog); xdp 54 drivers/net/netdevsim/netdev.c if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) xdp 65 drivers/net/netdevsim/netdevsim.h struct xdp_attachment_info xdp; xdp 1148 drivers/net/tap.c static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) xdp 1150 drivers/net/tap.c struct tun_xdp_hdr *hdr = xdp->data_hard_start; xdp 1161 drivers/net/tap.c skb = build_skb(xdp->data_hard_start, buflen); xdp 1167 drivers/net/tap.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 1168 drivers/net/tap.c skb_put(skb, xdp->data_end - xdp->data); xdp 1215 drivers/net/tap.c struct xdp_buff *xdp; xdp 1220 drivers/net/tap.c xdp = &((struct xdp_buff *)ctl->ptr)[i]; xdp 1221 drivers/net/tap.c tap_get_user_xdp(q, xdp); xdp 1233 drivers/net/tun.c static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 1235 drivers/net/tun.c switch (xdp->command) { xdp 1237 drivers/net/tun.c return tun_xdp_set(dev, xdp->prog, xdp->extack); xdp 1239 drivers/net/tun.c xdp->prog_id = tun_xdp_query(dev); xdp 1310 drivers/net/tun.c struct xdp_frame *xdp = frames[i]; xdp 1314 drivers/net/tun.c void *frame = tun_xdp_to_ptr(xdp); xdp 1318 drivers/net/tun.c xdp_return_frame_rx_napi(xdp); xdp 1331 drivers/net/tun.c static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) xdp 1333 drivers/net/tun.c struct xdp_frame *frame = convert_to_xdp_frame(xdp); xdp 1624 drivers/net/tun.c struct xdp_buff *xdp, u32 act) xdp 1630 drivers/net/tun.c err = xdp_do_redirect(tun->dev, xdp, xdp_prog); xdp 1635 drivers/net/tun.c err = tun_xdp_tx(tun->dev, xdp); xdp 1703 drivers/net/tun.c struct xdp_buff xdp; xdp 1706 drivers/net/tun.c xdp.data_hard_start = buf; xdp 1707 drivers/net/tun.c xdp.data = buf + pad; xdp 1708 drivers/net/tun.c xdp_set_data_meta_invalid(&xdp); xdp 1709 drivers/net/tun.c xdp.data_end = xdp.data + len; xdp 1710 drivers/net/tun.c xdp.rxq = &tfile->xdp_rxq; xdp 1712 drivers/net/tun.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 1717 drivers/net/tun.c err = tun_xdp_act(tun, xdp_prog, &xdp, act); xdp 1729 drivers/net/tun.c pad = xdp.data - xdp.data_hard_start; xdp 1730 drivers/net/tun.c len = xdp.data_end - xdp.data; xdp 2430 drivers/net/tun.c struct xdp_buff *xdp, int *flush, xdp 2433 drivers/net/tun.c unsigned int datasize = xdp->data_end - xdp->data; xdp 2434 drivers/net/tun.c struct tun_xdp_hdr *hdr = xdp->data_hard_start; xdp 2451 drivers/net/tun.c xdp_set_data_meta_invalid(xdp); xdp 2452 drivers/net/tun.c xdp->rxq = &tfile->xdp_rxq; xdp 2454 drivers/net/tun.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 2455 drivers/net/tun.c err = tun_xdp_act(tun, xdp_prog, xdp, act); xdp 2457 drivers/net/tun.c put_page(virt_to_head_page(xdp->data)); xdp 2470 drivers/net/tun.c page = virt_to_head_page(xdp->data); xdp 2483 drivers/net/tun.c skb = build_skb(xdp->data_hard_start, buflen); xdp 2489 drivers/net/tun.c skb_reserve(skb, xdp->data - xdp->data_hard_start); xdp 2490 drivers/net/tun.c skb_put(skb, xdp->data_end - xdp->data); xdp 2538 drivers/net/tun.c struct xdp_buff *xdp; xdp 2554 drivers/net/tun.c xdp = &((struct xdp_buff *)ctl->ptr)[i]; xdp 2555 drivers/net/tun.c tun_xdp_one(tun, tfile, xdp, &flush, &tpage); xdp 229 drivers/net/veth.c struct veth_rq *rq, bool xdp) xdp 231 drivers/net/veth.c return __dev_forward_skb(dev, skb) ?: xdp ? xdp 491 drivers/net/veth.c static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp, xdp 494 drivers/net/veth.c struct xdp_frame *frame = convert_to_xdp_frame(xdp); xdp 523 drivers/net/veth.c struct xdp_buff xdp; xdp 526 drivers/net/veth.c xdp.data_hard_start = hard_start; xdp 527 drivers/net/veth.c xdp.data = frame->data; xdp 528 drivers/net/veth.c xdp.data_end = frame->data + frame->len; xdp 529 drivers/net/veth.c xdp.data_meta = frame->data - frame->metasize; xdp 530 drivers/net/veth.c xdp.rxq = &rq->xdp_rxq; xdp 532 drivers/net/veth.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 536 drivers/net/veth.c delta = frame->data - xdp.data; xdp 537 drivers/net/veth.c len = xdp.data_end - xdp.data; xdp 541 drivers/net/veth.c xdp.data_hard_start = head; xdp 542 drivers/net/veth.c xdp.rxq->mem = frame->mem; xdp 543 drivers/net/veth.c if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) { xdp 553 drivers/net/veth.c xdp.data_hard_start = head; xdp 554 drivers/net/veth.c xdp.rxq->mem = frame->mem; xdp 555 drivers/net/veth.c if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) { xdp 601 drivers/net/veth.c struct xdp_buff xdp; xdp 654 drivers/net/veth.c xdp.data_hard_start = skb->head; xdp 655 drivers/net/veth.c xdp.data = skb_mac_header(skb); xdp 656 drivers/net/veth.c xdp.data_end = xdp.data + pktlen; xdp 657 drivers/net/veth.c xdp.data_meta = xdp.data; xdp 658 drivers/net/veth.c xdp.rxq = &rq->xdp_rxq; xdp 659 drivers/net/veth.c orig_data = xdp.data; xdp 660 drivers/net/veth.c orig_data_end = xdp.data_end; xdp 662 drivers/net/veth.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 668 drivers/net/veth.c get_page(virt_to_page(xdp.data)); xdp 670 drivers/net/veth.c xdp.rxq->mem = rq->xdp_mem; xdp 671 drivers/net/veth.c if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) { xdp 679 drivers/net/veth.c get_page(virt_to_page(xdp.data)); xdp 681 drivers/net/veth.c xdp.rxq->mem = rq->xdp_mem; xdp 682 drivers/net/veth.c if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) xdp 698 drivers/net/veth.c delta = orig_data - xdp.data; xdp 705 drivers/net/veth.c off = xdp.data_end - orig_data_end; xdp 710 drivers/net/veth.c metalen = xdp.data - xdp.data_meta; xdp 721 drivers/net/veth.c page_frag_free(xdp.data); xdp 1149 drivers/net/veth.c static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 1151 drivers/net/veth.c switch (xdp->command) { xdp 1153 drivers/net/veth.c return veth_xdp_set(dev, xdp->prog, xdp->extack); xdp 1155 drivers/net/veth.c xdp->prog_id = veth_xdp_query(dev); xdp 656 drivers/net/virtio_net.c struct xdp_buff xdp; xdp 684 drivers/net/virtio_net.c xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp 685 drivers/net/virtio_net.c xdp.data = xdp.data_hard_start + xdp_headroom; xdp 686 drivers/net/virtio_net.c xdp_set_data_meta_invalid(&xdp); xdp 687 drivers/net/virtio_net.c xdp.data_end = xdp.data + len; xdp 688 drivers/net/virtio_net.c xdp.rxq = &rq->xdp_rxq; xdp 689 drivers/net/virtio_net.c orig_data = xdp.data; xdp 690 drivers/net/virtio_net.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 696 drivers/net/virtio_net.c delta = orig_data - xdp.data; xdp 697 drivers/net/virtio_net.c len = xdp.data_end - xdp.data; xdp 701 drivers/net/virtio_net.c xdpf = convert_to_xdp_frame(&xdp); xdp 714 drivers/net/virtio_net.c err = xdp_do_redirect(dev, &xdp, xdp_prog); xdp 805 drivers/net/virtio_net.c struct xdp_buff xdp; xdp 840 drivers/net/virtio_net.c xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp 841 drivers/net/virtio_net.c xdp.data = data + vi->hdr_len; xdp 842 drivers/net/virtio_net.c xdp_set_data_meta_invalid(&xdp); xdp 843 drivers/net/virtio_net.c xdp.data_end = xdp.data + (len - vi->hdr_len); xdp 844 drivers/net/virtio_net.c xdp.rxq = &rq->xdp_rxq; xdp 846 drivers/net/virtio_net.c act = bpf_prog_run_xdp(xdp_prog, &xdp); xdp 855 drivers/net/virtio_net.c offset = xdp.data - xdp 861 drivers/net/virtio_net.c len = xdp.data_end - xdp.data + vi->hdr_len; xdp 874 drivers/net/virtio_net.c xdpf = convert_to_xdp_frame(&xdp); xdp 891 drivers/net/virtio_net.c err = xdp_do_redirect(dev, &xdp, xdp_prog); xdp 2532 drivers/net/virtio_net.c static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp 2534 drivers/net/virtio_net.c switch (xdp->command) { xdp 2536 drivers/net/virtio_net.c return virtnet_xdp_set(dev, xdp->prog, xdp->extack); xdp 2538 drivers/net/virtio_net.c xdp->prog_id = virtnet_xdp_query(dev); xdp 128 drivers/vhost/net.c struct xdp_buff *xdp; xdp 467 drivers/vhost/net.c .ptr = nvq->xdp, xdp 691 drivers/vhost/net.c struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; xdp 744 drivers/vhost/net.c xdp->data_hard_start = buf; xdp 745 drivers/vhost/net.c xdp->data = buf + pad; xdp 746 drivers/vhost/net.c xdp->data_end = xdp->data + len; xdp 1279 drivers/vhost/net.c struct xdp_buff *xdp; xdp 1300 drivers/vhost/net.c xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); xdp 1301 drivers/vhost/net.c if (!xdp) { xdp 1307 drivers/vhost/net.c n->vqs[VHOST_NET_VQ_TX].xdp = xdp; xdp 1407 drivers/vhost/net.c kfree(n->vqs[VHOST_NET_VQ_TX].xdp); xdp 723 include/linux/bpf.h int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, xdp 730 include/linux/bpf.h int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, xdp 822 include/linux/bpf.h int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, xdp 848 include/linux/bpf.h struct xdp_buff *xdp, xdp 978 include/linux/bpf.h int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, xdp 989 include/linux/bpf.h static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, xdp 8 include/linux/bpf_types.h BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) xdp 697 include/linux/filter.h struct xdp_buff *xdp) xdp 705 include/linux/filter.h return BPF_PROG_RUN(prog, xdp); xdp 913 include/linux/filter.h struct xdp_buff *xdp, struct bpf_prog *prog); xdp 915 include/linux/filter.h struct xdp_buff *xdp, xdp 1440 include/linux/netdevice.h struct xdp_frame **xdp, xdp 178 include/net/net_namespace.h struct netns_xdp xdp; xdp 94 include/net/xdp.h struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); xdp 98 include/net/xdp.h struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) xdp 104 include/net/xdp.h if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) xdp 105 include/net/xdp.h return xdp_convert_zc_to_xdp_frame(xdp); xdp 108 include/net/xdp.h headroom = xdp->data - xdp->data_hard_start; xdp 109 include/net/xdp.h metasize = xdp->data - xdp->data_meta; xdp 115 include/net/xdp.h xdp_frame = xdp->data_hard_start; xdp 117 include/net/xdp.h xdp_frame->data = xdp->data; xdp 118 include/net/xdp.h xdp_frame->len = xdp->data_end - xdp->data; xdp 123 include/net/xdp.h xdp_frame->mem = xdp->rxq->mem; xdp 130 include/net/xdp.h void xdp_return_buff(struct xdp_buff *xdp); xdp 160 include/net/xdp.h xdp_set_data_meta_invalid(struct xdp_buff *xdp) xdp 162 include/net/xdp.h xdp->data_meta = xdp->data + 1; xdp 166 include/net/xdp.h xdp_data_meta_unsupported(const struct xdp_buff *xdp) xdp 168 include/net/xdp.h return unlikely(xdp->data_meta > xdp->data); xdp 111 include/net/xdp_sock.h int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); xdp 112 include/net/xdp_sock.h int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); xdp 222 include/net/xdp_sock.h static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) xdp 227 include/net/xdp_sock.h static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) xdp 3 include/trace/events/xdp.h #define TRACE_SYSTEM xdp xdp 31 include/trace/events/xdp.h const struct bpf_prog *xdp, u32 act), xdp 33 include/trace/events/xdp.h TP_ARGS(dev, xdp, act), xdp 42 include/trace/events/xdp.h __entry->prog_id = xdp->aux->id; xdp 85 include/trace/events/xdp.h const struct bpf_prog *xdp, xdp 89 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), xdp 102 include/trace/events/xdp.h __entry->prog_id = xdp->aux->id; xdp 120 include/trace/events/xdp.h const struct bpf_prog *xdp, xdp 123 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) xdp 128 include/trace/events/xdp.h const struct bpf_prog *xdp, xdp 131 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) xdp 134 include/trace/events/xdp.h #define _trace_xdp_redirect(dev, xdp, to) \ xdp 135 include/trace/events/xdp.h trace_xdp_redirect(dev, xdp, to, 0, NULL, 0); xdp 137 include/trace/events/xdp.h #define _trace_xdp_redirect_err(dev, xdp, to, err) \ xdp 138 include/trace/events/xdp.h trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0); xdp 142 include/trace/events/xdp.h const struct bpf_prog *xdp, xdp 145 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), xdp 157 include/trace/events/xdp.h const struct bpf_prog *xdp, xdp 160 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), xdp 182 include/trace/events/xdp.h #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ xdp 183 include/trace/events/xdp.h trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ xdp 186 include/trace/events/xdp.h #define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \ xdp 187 include/trace/events/xdp.h trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \ xdp 668 kernel/bpf/cpumap.c int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, xdp 673 kernel/bpf/cpumap.c xdpf = convert_to_xdp_frame(xdp); xdp 461 kernel/bpf/devmap.c int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, xdp 471 kernel/bpf/devmap.c err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); xdp 475 kernel/bpf/devmap.c xdpf = convert_to_xdp_frame(xdp); xdp 175 kernel/bpf/xskmap.c int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, xdp 182 kernel/bpf/xskmap.c err = xsk_rcv(xs, xdp); xdp 348 net/bpf/test_run.c struct xdp_buff xdp = {}; xdp 360 net/bpf/test_run.c xdp.data_hard_start = data; xdp 361 net/bpf/test_run.c xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; xdp 362 net/bpf/test_run.c xdp.data_meta = xdp.data; xdp 363 net/bpf/test_run.c xdp.data_end = xdp.data + size; xdp 366 net/bpf/test_run.c xdp.rxq = &rxqueue->xdp_rxq; xdp 368 net/bpf/test_run.c ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration); xdp 371 net/bpf/test_run.c if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN || xdp 372 net/bpf/test_run.c xdp.data_end != xdp.data + size) xdp 373 net/bpf/test_run.c size = xdp.data_end - xdp.data; xdp 374 net/bpf/test_run.c ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); xdp 4228 net/core/dev.c struct xdp_buff *xdp, xdp 4271 net/core/dev.c xdp->data = skb->data - mac_len; xdp 4272 net/core/dev.c xdp->data_meta = xdp->data; xdp 4273 net/core/dev.c xdp->data_end = xdp->data + hlen; xdp 4274 net/core/dev.c xdp->data_hard_start = skb->data - skb_headroom(skb); xdp 4275 net/core/dev.c orig_data_end = xdp->data_end; xdp 4276 net/core/dev.c orig_data = xdp->data; xdp 4277 net/core/dev.c eth = (struct ethhdr *)xdp->data; xdp 4282 net/core/dev.c xdp->rxq = &rxqueue->xdp_rxq; xdp 4284 net/core/dev.c act = bpf_prog_run_xdp(xdp_prog, xdp); xdp 4287 net/core/dev.c off = xdp->data - orig_data; xdp 4301 net/core/dev.c off = orig_data_end - xdp->data_end; xdp 4303 net/core/dev.c skb_set_tail_pointer(skb, xdp->data_end - xdp->data); xdp 4309 net/core/dev.c eth = (struct ethhdr *)xdp->data; xdp 4322 net/core/dev.c metalen = xdp->data - xdp->data_meta; xdp 4372 net/core/dev.c struct xdp_buff xdp; xdp 4376 net/core/dev.c act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); xdp 4381 net/core/dev.c &xdp, xdp_prog); xdp 5072 net/core/dev.c static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) xdp 5075 net/core/dev.c struct bpf_prog *new = xdp->prog; xdp 5078 net/core/dev.c switch (xdp->command) { xdp 5094 net/core/dev.c xdp->prog_id = old ? old->aux->id : 0; xdp 8312 net/core/dev.c struct netdev_bpf xdp; xdp 8317 net/core/dev.c memset(&xdp, 0, sizeof(xdp)); xdp 8318 net/core/dev.c xdp.command = cmd; xdp 8321 net/core/dev.c WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); xdp 8323 net/core/dev.c return xdp.prog_id; xdp 8330 net/core/dev.c struct netdev_bpf xdp; xdp 8332 net/core/dev.c memset(&xdp, 0, sizeof(xdp)); xdp 8334 net/core/dev.c xdp.command = XDP_SETUP_PROG_HW; xdp 8336 net/core/dev.c xdp.command = XDP_SETUP_PROG; xdp 8337 net/core/dev.c xdp.extack = extack; xdp 8338 net/core/dev.c xdp.flags = flags; xdp 8339 net/core/dev.c xdp.prog = prog; xdp 8341 net/core/dev.c return bpf_op(dev, &xdp); xdp 8346 net/core/dev.c struct netdev_bpf xdp; xdp 8357 net/core/dev.c memset(&xdp, 0, sizeof(xdp)); xdp 8358 net/core/dev.c xdp.command = XDP_QUERY_PROG; xdp 8359 net/core/dev.c WARN_ON(ndo_bpf(dev, &xdp)); xdp 8360 net/core/dev.c if (xdp.prog_id) xdp 8361 net/core/dev.c WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, xdp 8365 net/core/dev.c memset(&xdp, 0, sizeof(xdp)); xdp 8366 net/core/dev.c xdp.command = XDP_QUERY_PROG_HW; xdp 8367 net/core/dev.c if (!ndo_bpf(dev, &xdp) && xdp.prog_id) xdp 8368 net/core/dev.c WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, xdp 3376 net/core/filter.c static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) xdp 3378 net/core/filter.c return xdp_data_meta_unsupported(xdp) ? 0 : xdp 3379 net/core/filter.c xdp->data - xdp->data_meta; xdp 3382 net/core/filter.c BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) xdp 3384 net/core/filter.c void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); xdp 3385 net/core/filter.c unsigned long metalen = xdp_get_metalen(xdp); xdp 3387 net/core/filter.c void *data = xdp->data + offset; xdp 3390 net/core/filter.c data > xdp->data_end - ETH_HLEN)) xdp 3394 net/core/filter.c memmove(xdp->data_meta + offset, xdp 3395 net/core/filter.c xdp->data_meta, metalen); xdp 3396 net/core/filter.c xdp->data_meta += offset; xdp 3397 net/core/filter.c xdp->data = data; xdp 3410 net/core/filter.c BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) xdp 3412 net/core/filter.c void *data_end = xdp->data_end + offset; xdp 3418 net/core/filter.c if (unlikely(data_end < xdp->data + ETH_HLEN)) xdp 3421 net/core/filter.c xdp->data_end = data_end; xdp 3434 net/core/filter.c BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) xdp 3436 net/core/filter.c void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); xdp 3437 net/core/filter.c void *meta = xdp->data_meta + offset; xdp 3438 net/core/filter.c unsigned long metalen = xdp->data - meta; xdp 3440 net/core/filter.c if (xdp_data_meta_unsupported(xdp)) xdp 3443 net/core/filter.c meta > xdp->data)) xdp 3449 net/core/filter.c xdp->data_meta = meta; xdp 3464 net/core/filter.c struct xdp_buff *xdp, xdp 3474 net/core/filter.c err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); xdp 3478 net/core/filter.c xdpf = convert_to_xdp_frame(xdp); xdp 3489 net/core/filter.c xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, xdp 3503 net/core/filter.c err = __bpf_tx_xdp(fwd, NULL, xdp, 0); xdp 3516 net/core/filter.c struct xdp_buff *xdp, xdp 3526 net/core/filter.c err = dev_map_enqueue(dst, xdp, dev_rx); xdp 3534 net/core/filter.c err = cpu_map_enqueue(rcpu, xdp, dev_rx); xdp 3542 net/core/filter.c err = __xsk_map_redirect(map, xdp, xs); xdp 3609 net/core/filter.c static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, xdp 3624 net/core/filter.c err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); xdp 3636 net/core/filter.c int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, xdp 3643 net/core/filter.c return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); xdp 3645 net/core/filter.c return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri); xdp 3651 net/core/filter.c struct xdp_buff *xdp, xdp 3674 net/core/filter.c err = xsk_generic_rcv(xs, xdp); xdp 3692 net/core/filter.c struct xdp_buff *xdp, struct bpf_prog *xdp_prog) xdp 3701 net/core/filter.c return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, xdp 4138 net/core/filter.c BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, xdp 4145 net/core/filter.c if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) xdp 4148 net/core/filter.c return bpf_event_output(map, flags, meta, meta_size, xdp->data, xdp 1416 net/core/rtnetlink.c struct nlattr *xdp; xdp 1421 net/core/rtnetlink.c xdp = nla_nest_start_noflag(skb, IFLA_XDP); xdp 1422 net/core/rtnetlink.c if (!xdp) xdp 1450 net/core/rtnetlink.c nla_nest_end(skb, xdp); xdp 1454 net/core/rtnetlink.c nla_nest_cancel(skb, xdp); xdp 2698 net/core/rtnetlink.c struct nlattr *xdp[IFLA_XDP_MAX + 1]; xdp 2701 net/core/rtnetlink.c err = nla_parse_nested_deprecated(xdp, IFLA_XDP_MAX, xdp 2707 net/core/rtnetlink.c if (xdp[IFLA_XDP_ATTACHED] || xdp[IFLA_XDP_PROG_ID]) { xdp 2712 net/core/rtnetlink.c if (xdp[IFLA_XDP_FLAGS]) { xdp 2713 net/core/rtnetlink.c xdp_flags = nla_get_u32(xdp[IFLA_XDP_FLAGS]); xdp 2724 net/core/rtnetlink.c if (xdp[IFLA_XDP_FD]) { xdp 2726 net/core/rtnetlink.c nla_get_s32(xdp[IFLA_XDP_FD]), xdp 415 net/core/xdp.c void xdp_return_buff(struct xdp_buff *xdp) xdp 417 net/core/xdp.c __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); xdp 467 net/core/xdp.c struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) xdp 475 net/core/xdp.c metasize = xdp_data_meta_unsupported(xdp) ? 0 : xdp 476 net/core/xdp.c xdp->data - xdp->data_meta; xdp 477 net/core/xdp.c totsize = xdp->data_end - xdp->data + metasize; xdp 491 net/core/xdp.c data_to_copy = metasize ? xdp->data_meta : xdp->data; xdp 500 net/core/xdp.c xdp_return_buff(xdp); xdp 142 net/xdp/xsk.c static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) xdp 156 net/xdp/xsk.c if (unlikely(xdp_data_meta_unsupported(xdp))) { xdp 157 net/xdp/xsk.c from_buf = xdp->data; xdp 160 net/xdp/xsk.c from_buf = xdp->data_meta; xdp 161 net/xdp/xsk.c metalen = xdp->data - xdp->data_meta; xdp 172 net/xdp/xsk.c xdp_return_buff(xdp); xdp 180 net/xdp/xsk.c static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) xdp 182 net/xdp/xsk.c int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); xdp 200 net/xdp/xsk.c int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) xdp 207 net/xdp/xsk.c if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) xdp 210 net/xdp/xsk.c len = xdp->data_end - xdp->data; xdp 212 net/xdp/xsk.c return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ? xdp 213 net/xdp/xsk.c __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); xdp 222 net/xdp/xsk.c int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) xdp 224 net/xdp/xsk.c u32 metalen = xdp->data - xdp->data_meta; xdp 225 net/xdp/xsk.c u32 len = xdp->data_end - xdp->data; xdp 233 net/xdp/xsk.c if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) { xdp 246 net/xdp/xsk.c memcpy(buffer, xdp->data_meta, len + metalen); xdp 550 net/xdp/xsk.c mutex_lock(&net->xdp.lock); xdp 552 net/xdp/xsk.c mutex_unlock(&net->xdp.lock); xdp 1016 net/xdp/xsk.c mutex_lock(&net->xdp.lock); xdp 1017 net/xdp/xsk.c sk_for_each(sk, &net->xdp.list) { xdp 1033 net/xdp/xsk.c mutex_unlock(&net->xdp.lock); xdp 1118 net/xdp/xsk.c mutex_lock(&net->xdp.lock); xdp 1119 net/xdp/xsk.c sk_add_node_rcu(sk, &net->xdp.list); xdp 1120 net/xdp/xsk.c mutex_unlock(&net->xdp.lock); xdp 1141 net/xdp/xsk.c mutex_init(&net->xdp.lock); xdp 1142 net/xdp/xsk.c INIT_HLIST_HEAD(&net->xdp.list); xdp 1148 net/xdp/xsk.c WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); xdp 138 net/xdp/xsk_diag.c mutex_lock(&net->xdp.lock); xdp 140 net/xdp/xsk_diag.c sk_for_each(sk, &net->xdp.list) { xdp 156 net/xdp/xsk_diag.c mutex_unlock(&net->xdp.lock); xdp 67 samples/bpf/xdp_adjust_tail_kern.c static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) xdp 71 samples/bpf/xdp_adjust_tail_kern.c if (bpf_xdp_adjust_head(xdp, 0 - headroom)) xdp 73 samples/bpf/xdp_adjust_tail_kern.c void *data = (void *)(long)xdp->data; xdp 74 samples/bpf/xdp_adjust_tail_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 117 samples/bpf/xdp_adjust_tail_kern.c static __always_inline int handle_ipv4(struct xdp_md *xdp) xdp 119 samples/bpf/xdp_adjust_tail_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 120 samples/bpf/xdp_adjust_tail_kern.c void *data = (void *)(long)xdp->data; xdp 126 samples/bpf/xdp_adjust_tail_kern.c if (bpf_xdp_adjust_tail(xdp, 0 - offset)) xdp 128 samples/bpf/xdp_adjust_tail_kern.c return send_icmp4_too_big(xdp); xdp 134 samples/bpf/xdp_adjust_tail_kern.c int _xdp_icmp(struct xdp_md *xdp) xdp 136 samples/bpf/xdp_adjust_tail_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 137 samples/bpf/xdp_adjust_tail_kern.c void *data = (void *)(long)xdp->data; xdp 147 samples/bpf/xdp_adjust_tail_kern.c return handle_ipv4(xdp); xdp 77 samples/bpf/xdp_tx_iptunnel_kern.c static __always_inline int handle_ipv4(struct xdp_md *xdp) xdp 79 samples/bpf/xdp_tx_iptunnel_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 80 samples/bpf/xdp_tx_iptunnel_kern.c void *data = (void *)(long)xdp->data; xdp 112 samples/bpf/xdp_tx_iptunnel_kern.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) xdp 115 samples/bpf/xdp_tx_iptunnel_kern.c data = (void *)(long)xdp->data; xdp 116 samples/bpf/xdp_tx_iptunnel_kern.c data_end = (void *)(long)xdp->data_end; xdp 152 samples/bpf/xdp_tx_iptunnel_kern.c static __always_inline int handle_ipv6(struct xdp_md *xdp) xdp 154 samples/bpf/xdp_tx_iptunnel_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 155 samples/bpf/xdp_tx_iptunnel_kern.c void *data = (void *)(long)xdp->data; xdp 184 samples/bpf/xdp_tx_iptunnel_kern.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) xdp 187 samples/bpf/xdp_tx_iptunnel_kern.c data = (void *)(long)xdp->data; xdp 188 samples/bpf/xdp_tx_iptunnel_kern.c data_end = (void *)(long)xdp->data_end; xdp 216 samples/bpf/xdp_tx_iptunnel_kern.c int _xdp_tx_iptunnel(struct xdp_md *xdp) xdp 218 samples/bpf/xdp_tx_iptunnel_kern.c void *data_end = (void *)(long)xdp->data_end; xdp 219 samples/bpf/xdp_tx_iptunnel_kern.c void *data = (void *)(long)xdp->data; xdp 229 samples/bpf/xdp_tx_iptunnel_kern.c return handle_ipv4(xdp); xdp 232 samples/bpf/xdp_tx_iptunnel_kern.c return handle_ipv6(xdp); xdp 4455 tools/lib/bpf/libbpf.c BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); xdp 15 tools/testing/selftests/bpf/progs/test_adjust_tail.c int _xdp_adjust_tail(struct xdp_md *xdp) xdp 17 tools/testing/selftests/bpf/progs/test_adjust_tail.c void *data_end = (void *)(long)xdp->data_end; xdp 18 tools/testing/selftests/bpf/progs/test_adjust_tail.c void *data = (void *)(long)xdp->data; xdp 25 tools/testing/selftests/bpf/progs/test_adjust_tail.c if (bpf_xdp_adjust_tail(xdp, 0 - offset)) xdp 80 tools/testing/selftests/bpf/progs/test_xdp.c static __always_inline int handle_ipv4(struct xdp_md *xdp) xdp 82 tools/testing/selftests/bpf/progs/test_xdp.c void *data_end = (void *)(long)xdp->data_end; xdp 83 tools/testing/selftests/bpf/progs/test_xdp.c void *data = (void *)(long)xdp->data; xdp 113 tools/testing/selftests/bpf/progs/test_xdp.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) xdp 116 tools/testing/selftests/bpf/progs/test_xdp.c data = (void *)(long)xdp->data; xdp 117 tools/testing/selftests/bpf/progs/test_xdp.c data_end = (void *)(long)xdp->data_end; xdp 153 tools/testing/selftests/bpf/progs/test_xdp.c static __always_inline int handle_ipv6(struct xdp_md *xdp) xdp 155 tools/testing/selftests/bpf/progs/test_xdp.c void *data_end = (void *)(long)xdp->data_end; xdp 156 tools/testing/selftests/bpf/progs/test_xdp.c void *data = (void *)(long)xdp->data; xdp 183 tools/testing/selftests/bpf/progs/test_xdp.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) xdp 186 tools/testing/selftests/bpf/progs/test_xdp.c data = (void *)(long)xdp->data; xdp 187 tools/testing/selftests/bpf/progs/test_xdp.c data_end = (void *)(long)xdp->data_end; xdp 214 tools/testing/selftests/bpf/progs/test_xdp.c int _xdp_tx_iptunnel(struct xdp_md *xdp) xdp 216 tools/testing/selftests/bpf/progs/test_xdp.c void *data_end = (void *)(long)xdp->data_end; xdp 217 tools/testing/selftests/bpf/progs/test_xdp.c void *data = (void *)(long)xdp->data; xdp 227 tools/testing/selftests/bpf/progs/test_xdp.c return handle_ipv4(xdp); xdp 230 tools/testing/selftests/bpf/progs/test_xdp.c return handle_ipv6(xdp); xdp 76 tools/testing/selftests/bpf/progs/test_xdp_loop.c static __always_inline int handle_ipv4(struct xdp_md *xdp) xdp 78 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data_end = (void *)(long)xdp->data_end; xdp 79 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data = (void *)(long)xdp->data; xdp 109 tools/testing/selftests/bpf/progs/test_xdp_loop.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) xdp 112 tools/testing/selftests/bpf/progs/test_xdp_loop.c data = (void *)(long)xdp->data; xdp 113 tools/testing/selftests/bpf/progs/test_xdp_loop.c data_end = (void *)(long)xdp->data_end; xdp 149 tools/testing/selftests/bpf/progs/test_xdp_loop.c static __always_inline int handle_ipv6(struct xdp_md *xdp) xdp 151 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data_end = (void *)(long)xdp->data_end; xdp 152 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data = (void *)(long)xdp->data; xdp 179 tools/testing/selftests/bpf/progs/test_xdp_loop.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) xdp 182 tools/testing/selftests/bpf/progs/test_xdp_loop.c data = (void *)(long)xdp->data; xdp 183 tools/testing/selftests/bpf/progs/test_xdp_loop.c data_end = (void *)(long)xdp->data_end; xdp 210 tools/testing/selftests/bpf/progs/test_xdp_loop.c int _xdp_tx_iptunnel(struct xdp_md *xdp) xdp 212 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data_end = (void *)(long)xdp->data_end; xdp 213 tools/testing/selftests/bpf/progs/test_xdp_loop.c void *data = (void *)(long)xdp->data; xdp 223 tools/testing/selftests/bpf/progs/test_xdp_loop.c return handle_ipv4(xdp); xdp 226 tools/testing/selftests/bpf/progs/test_xdp_loop.c return handle_ipv6(xdp); xdp 278 tools/testing/selftests/bpf/progs/test_xdp_noinline.c bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval, xdp 289 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) xdp 291 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data = (void *)(long)xdp->data; xdp 292 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data_end = (void *)(long)xdp->data_end; xdp 321 tools/testing/selftests/bpf/progs/test_xdp_noinline.c bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, xdp 337 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) xdp 339 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data = (void *)(long)xdp->data; xdp 340 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data_end = (void *)(long)xdp->data_end; xdp 369 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) xdp 375 tools/testing/selftests/bpf/progs/test_xdp_noinline.c bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) xdp 388 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr))) xdp 390 tools/testing/selftests/bpf/progs/test_xdp_noinline.c *data = (void *)(long)xdp->data; xdp 391 tools/testing/selftests/bpf/progs/test_xdp_noinline.c *data_end = (void *)(long)xdp->data_end; xdp 396 tools/testing/selftests/bpf/progs/test_xdp_noinline.c bool decap_v4(struct xdp_md *xdp, void **data, void **data_end) xdp 406 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) xdp 408 tools/testing/selftests/bpf/progs/test_xdp_noinline.c *data = (void *)(long)xdp->data; xdp 409 tools/testing/selftests/bpf/progs/test_xdp_noinline.c *data_end = (void *)(long)xdp->data_end; xdp 692 tools/testing/selftests/bpf/progs/test_xdp_noinline.c bool is_ipv6, struct xdp_md *xdp) xdp 779 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes)) xdp 782 tools/testing/selftests/bpf/progs/test_xdp_noinline.c if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes)) xdp 792 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data = (void *)(long)xdp->data; xdp 793 tools/testing/selftests/bpf/progs/test_xdp_noinline.c data_end = (void *)(long)xdp->data_end; xdp 18 tools/testing/selftests/bpf/progs/test_xdp_redirect.c int xdp_redirect_to_111(struct xdp_md *xdp) xdp 23 tools/testing/selftests/bpf/progs/test_xdp_redirect.c int xdp_redirect_to_222(struct xdp_md *xdp) xdp 14 tools/testing/selftests/bpf/progs/xdp_redirect_map.c int xdp_redirect_map_0(struct xdp_md *xdp) xdp 20 tools/testing/selftests/bpf/progs/xdp_redirect_map.c int xdp_redirect_map_1(struct xdp_md *xdp) xdp 26 tools/testing/selftests/bpf/progs/xdp_redirect_map.c int xdp_redirect_map_2(struct xdp_md *xdp) xdp 7 tools/testing/selftests/bpf/progs/xdp_tx.c int xdp_tx(struct xdp_md *xdp) xdp 81 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c static int get_map_fd_by_prog_id(int prog_id, bool *xdp) xdp 108 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c *xdp = info.type == BPF_PROG_TYPE_XDP; xdp 119 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c static int run_test(int server_fd, int results_fd, bool xdp) xdp 170 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c if (xdp && value_gen == 0) { xdp 210 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c bool xdp; xdp 217 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp); xdp 242 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c if (run_test(server, results, xdp)) xdp 245 tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c if (run_test(server_v6, results, xdp))