This source file includes following definitions.
- mlx5e_xsk_update_rx_wakeup
1
2
3
4 #ifndef __MLX5_EN_XSK_RX_H__
5 #define __MLX5_EN_XSK_RX_H__
6
7 #include "en.h"
8 #include <net/xdp_sock.h>
9
10
11
12 bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count);
13 int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
14 struct mlx5e_dma_info *dma_info);
15 void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
16 struct mlx5e_dma_info *dma_info);
17 void mlx5e_xsk_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
18 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
19 struct mlx5e_mpw_info *wi,
20 u16 cqe_bcnt,
21 u32 head_offset,
22 u32 page_idx);
23 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
24 struct mlx5_cqe64 *cqe,
25 struct mlx5e_wqe_frag_info *wi,
26 u32 cqe_bcnt);
27
28 static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
29 {
30 if (!xsk_umem_uses_need_wakeup(rq->umem))
31 return alloc_err;
32
33 if (unlikely(alloc_err))
34 xsk_set_rx_need_wakeup(rq->umem);
35 else
36 xsk_clear_rx_need_wakeup(rq->umem);
37
38 return false;
39 }
40
41 #endif