This source file includes following definitions.
- i40e_xsk_umem_dma_map
- i40e_xsk_umem_dma_unmap
- i40e_xsk_umem_enable
- i40e_xsk_umem_disable
- i40e_xsk_umem_setup
- i40e_run_xdp_zc
- i40e_alloc_buffer_zc
- i40e_alloc_buffer_slow_zc
- __i40e_alloc_rx_buffers_zc
- i40e_alloc_rx_buffers_zc
- i40e_alloc_rx_buffers_fast_zc
- i40e_get_rx_buffer_zc
- i40e_reuse_rx_buffer_zc
- i40e_zca_free
- i40e_construct_skb_zc
- i40e_inc_ntc
- i40e_clean_rx_irq_zc
- i40e_xmit_zc
- i40e_clean_xdp_tx_buffer
- i40e_clean_xdp_tx_irq
- i40e_xsk_wakeup
- i40e_xsk_clean_rx_ring
- i40e_xsk_clean_tx_ring
- i40e_xsk_any_rx_ring_enabled
1
2
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
6 #include <net/xdp.h>
7
8 #include "i40e.h"
9 #include "i40e_txrx_common.h"
10 #include "i40e_xsk.h"
11
12
13
14
15
16
17
18
19 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
20 {
21 struct i40e_pf *pf = vsi->back;
22 struct device *dev;
23 unsigned int i, j;
24 dma_addr_t dma;
25
26 dev = &pf->pdev->dev;
27 for (i = 0; i < umem->npgs; i++) {
28 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
29 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
30 if (dma_mapping_error(dev, dma))
31 goto out_unmap;
32
33 umem->pages[i].dma = dma;
34 }
35
36 return 0;
37
38 out_unmap:
39 for (j = 0; j < i; j++) {
40 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
41 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
42 umem->pages[i].dma = 0;
43 }
44
45 return -1;
46 }
47
48
49
50
51
52
53 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
54 {
55 struct i40e_pf *pf = vsi->back;
56 struct device *dev;
57 unsigned int i;
58
59 dev = &pf->pdev->dev;
60
61 for (i = 0; i < umem->npgs; i++) {
62 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
63 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
64
65 umem->pages[i].dma = 0;
66 }
67 }
68
69
70
71
72
73
74
75
76
77 static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
78 u16 qid)
79 {
80 struct net_device *netdev = vsi->netdev;
81 struct xdp_umem_fq_reuse *reuseq;
82 bool if_running;
83 int err;
84
85 if (vsi->type != I40E_VSI_MAIN)
86 return -EINVAL;
87
88 if (qid >= vsi->num_queue_pairs)
89 return -EINVAL;
90
91 if (qid >= netdev->real_num_rx_queues ||
92 qid >= netdev->real_num_tx_queues)
93 return -EINVAL;
94
95 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
96 if (!reuseq)
97 return -ENOMEM;
98
99 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
100
101 err = i40e_xsk_umem_dma_map(vsi, umem);
102 if (err)
103 return err;
104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
108
109 if (if_running) {
110 err = i40e_queue_pair_disable(vsi, qid);
111 if (err)
112 return err;
113
114 err = i40e_queue_pair_enable(vsi, qid);
115 if (err)
116 return err;
117
118
119 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
120 if (err)
121 return err;
122 }
123
124 return 0;
125 }
126
127
128
129
130
131
132
133
134 static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
135 {
136 struct net_device *netdev = vsi->netdev;
137 struct xdp_umem *umem;
138 bool if_running;
139 int err;
140
141 umem = xdp_get_umem_from_qid(netdev, qid);
142 if (!umem)
143 return -EINVAL;
144
145 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
146
147 if (if_running) {
148 err = i40e_queue_pair_disable(vsi, qid);
149 if (err)
150 return err;
151 }
152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
154 i40e_xsk_umem_dma_unmap(vsi, umem);
155
156 if (if_running) {
157 err = i40e_queue_pair_enable(vsi, qid);
158 if (err)
159 return err;
160 }
161
162 return 0;
163 }
164
165
166
167
168
169
170
171
172
173
174
175 int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
176 u16 qid)
177 {
178 return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
179 i40e_xsk_umem_disable(vsi, qid);
180 }
181
182
183
184
185
186
187
188
189
190
191 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
192 {
193 struct xdp_umem *umem = rx_ring->xsk_umem;
194 int err, result = I40E_XDP_PASS;
195 struct i40e_ring *xdp_ring;
196 struct bpf_prog *xdp_prog;
197 u64 offset;
198 u32 act;
199
200 rcu_read_lock();
201
202
203
204 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
205 act = bpf_prog_run_xdp(xdp_prog, xdp);
206 offset = xdp->data - xdp->data_hard_start;
207
208 xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
209
210 switch (act) {
211 case XDP_PASS:
212 break;
213 case XDP_TX:
214 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
215 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
216 break;
217 case XDP_REDIRECT:
218 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
219 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
220 break;
221 default:
222 bpf_warn_invalid_xdp_action(act);
223
224 case XDP_ABORTED:
225 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
226
227 case XDP_DROP:
228 result = I40E_XDP_CONSUMED;
229 break;
230 }
231 rcu_read_unlock();
232 return result;
233 }
234
235
236
237
238
239
240
241
242
243
244
245 static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
246 struct i40e_rx_buffer *bi)
247 {
248 struct xdp_umem *umem = rx_ring->xsk_umem;
249 void *addr = bi->addr;
250 u64 handle, hr;
251
252 if (addr) {
253 rx_ring->rx_stats.page_reuse_count++;
254 return true;
255 }
256
257 if (!xsk_umem_peek_addr(umem, &handle)) {
258 rx_ring->rx_stats.alloc_page_failed++;
259 return false;
260 }
261
262 hr = umem->headroom + XDP_PACKET_HEADROOM;
263
264 bi->dma = xdp_umem_get_dma(umem, handle);
265 bi->dma += hr;
266
267 bi->addr = xdp_umem_get_data(umem, handle);
268 bi->addr += hr;
269
270 bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
271
272 xsk_umem_discard_addr(umem);
273 return true;
274 }
275
276
277
278
279
280
281
282
283
284
285
286 static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
287 struct i40e_rx_buffer *bi)
288 {
289 struct xdp_umem *umem = rx_ring->xsk_umem;
290 u64 handle, hr;
291
292 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
293 rx_ring->rx_stats.alloc_page_failed++;
294 return false;
295 }
296
297 handle &= rx_ring->xsk_umem->chunk_mask;
298
299 hr = umem->headroom + XDP_PACKET_HEADROOM;
300
301 bi->dma = xdp_umem_get_dma(umem, handle);
302 bi->dma += hr;
303
304 bi->addr = xdp_umem_get_data(umem, handle);
305 bi->addr += hr;
306
307 bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
308
309 xsk_umem_discard_addr_rq(umem);
310 return true;
311 }
312
313 static __always_inline bool
314 __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
315 bool alloc(struct i40e_ring *rx_ring,
316 struct i40e_rx_buffer *bi))
317 {
318 u16 ntu = rx_ring->next_to_use;
319 union i40e_rx_desc *rx_desc;
320 struct i40e_rx_buffer *bi;
321 bool ok = true;
322
323 rx_desc = I40E_RX_DESC(rx_ring, ntu);
324 bi = &rx_ring->rx_bi[ntu];
325 do {
326 if (!alloc(rx_ring, bi)) {
327 ok = false;
328 goto no_buffers;
329 }
330
331 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
332 rx_ring->rx_buf_len,
333 DMA_BIDIRECTIONAL);
334
335 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
336
337 rx_desc++;
338 bi++;
339 ntu++;
340
341 if (unlikely(ntu == rx_ring->count)) {
342 rx_desc = I40E_RX_DESC(rx_ring, 0);
343 bi = rx_ring->rx_bi;
344 ntu = 0;
345 }
346
347 rx_desc->wb.qword1.status_error_len = 0;
348 count--;
349 } while (count);
350
351 no_buffers:
352 if (rx_ring->next_to_use != ntu)
353 i40e_release_rx_desc(rx_ring, ntu);
354
355 return ok;
356 }
357
358
359
360
361
362
363
364
365
366
367
368 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
369 {
370 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
371 i40e_alloc_buffer_slow_zc);
372 }
373
374
375
376
377
378
379
380
381
382
383
384 static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
385 {
386 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
387 i40e_alloc_buffer_zc);
388 }
389
390
391
392
393
394
395
396
397
398
399
400 static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
401 const unsigned int size)
402 {
403 struct i40e_rx_buffer *bi;
404
405 bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
406
407
408 dma_sync_single_range_for_cpu(rx_ring->dev,
409 bi->dma, 0,
410 size,
411 DMA_BIDIRECTIONAL);
412
413 return bi;
414 }
415
416
417
418
419
420
421
422
423
424 static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
425 struct i40e_rx_buffer *old_bi)
426 {
427 struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
428 u16 nta = rx_ring->next_to_alloc;
429
430
431 nta++;
432 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
433
434
435 new_bi->dma = old_bi->dma;
436 new_bi->addr = old_bi->addr;
437 new_bi->handle = old_bi->handle;
438
439 old_bi->addr = NULL;
440 }
441
442
443
444
445
446
447 void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
448 {
449 struct i40e_rx_buffer *bi;
450 struct i40e_ring *rx_ring;
451 u64 hr, mask;
452 u16 nta;
453
454 rx_ring = container_of(alloc, struct i40e_ring, zca);
455 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
456 mask = rx_ring->xsk_umem->chunk_mask;
457
458 nta = rx_ring->next_to_alloc;
459 bi = &rx_ring->rx_bi[nta];
460
461 nta++;
462 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
463
464 handle &= mask;
465
466 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
467 bi->dma += hr;
468
469 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
470 bi->addr += hr;
471
472 bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
473 rx_ring->xsk_umem->headroom);
474 }
475
476
477
478
479
480
481
482
483
484
485
486 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
487 struct i40e_rx_buffer *bi,
488 struct xdp_buff *xdp)
489 {
490 unsigned int metasize = xdp->data - xdp->data_meta;
491 unsigned int datasize = xdp->data_end - xdp->data;
492 struct sk_buff *skb;
493
494
495 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
496 xdp->data_end - xdp->data_hard_start,
497 GFP_ATOMIC | __GFP_NOWARN);
498 if (unlikely(!skb))
499 return NULL;
500
501 skb_reserve(skb, xdp->data - xdp->data_hard_start);
502 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
503 if (metasize)
504 skb_metadata_set(skb, metasize);
505
506 i40e_reuse_rx_buffer_zc(rx_ring, bi);
507 return skb;
508 }
509
510
511
512
513
514 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
515 {
516 u32 ntc = rx_ring->next_to_clean + 1;
517
518 ntc = (ntc < rx_ring->count) ? ntc : 0;
519 rx_ring->next_to_clean = ntc;
520 prefetch(I40E_RX_DESC(rx_ring, ntc));
521 }
522
523
524
525
526
527
528
529
530 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
531 {
532 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
533 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
534 unsigned int xdp_res, xdp_xmit = 0;
535 bool failure = false;
536 struct sk_buff *skb;
537 struct xdp_buff xdp;
538
539 xdp.rxq = &rx_ring->xdp_rxq;
540
541 while (likely(total_rx_packets < (unsigned int)budget)) {
542 struct i40e_rx_buffer *bi;
543 union i40e_rx_desc *rx_desc;
544 unsigned int size;
545 u64 qword;
546
547 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
548 failure = failure ||
549 !i40e_alloc_rx_buffers_fast_zc(rx_ring,
550 cleaned_count);
551 cleaned_count = 0;
552 }
553
554 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
555 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
556
557
558
559
560
561 dma_rmb();
562
563 bi = i40e_clean_programming_status(rx_ring, rx_desc,
564 qword);
565 if (unlikely(bi)) {
566 i40e_reuse_rx_buffer_zc(rx_ring, bi);
567 cleaned_count++;
568 continue;
569 }
570
571 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
572 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
573 if (!size)
574 break;
575
576 bi = i40e_get_rx_buffer_zc(rx_ring, size);
577 xdp.data = bi->addr;
578 xdp.data_meta = xdp.data;
579 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
580 xdp.data_end = xdp.data + size;
581 xdp.handle = bi->handle;
582
583 xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
584 if (xdp_res) {
585 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
586 xdp_xmit |= xdp_res;
587 bi->addr = NULL;
588 } else {
589 i40e_reuse_rx_buffer_zc(rx_ring, bi);
590 }
591
592 total_rx_bytes += size;
593 total_rx_packets++;
594
595 cleaned_count++;
596 i40e_inc_ntc(rx_ring);
597 continue;
598 }
599
600
601
602
603
604
605
606
607 skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
608 if (!skb) {
609 rx_ring->rx_stats.alloc_buff_failed++;
610 break;
611 }
612
613 cleaned_count++;
614 i40e_inc_ntc(rx_ring);
615
616 if (eth_skb_pad(skb))
617 continue;
618
619 total_rx_bytes += skb->len;
620 total_rx_packets++;
621
622 i40e_process_skb_fields(rx_ring, rx_desc, skb);
623 napi_gro_receive(&rx_ring->q_vector->napi, skb);
624 }
625
626 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
627 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
628
629 if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
630 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
631 xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
632 else
633 xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
634
635 return (int)total_rx_packets;
636 }
637 return failure ? budget : (int)total_rx_packets;
638 }
639
640
641
642
643
644
645
646
647 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
648 {
649 struct i40e_tx_desc *tx_desc = NULL;
650 struct i40e_tx_buffer *tx_bi;
651 bool work_done = true;
652 struct xdp_desc desc;
653 dma_addr_t dma;
654
655 while (budget-- > 0) {
656 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
657 xdp_ring->tx_stats.tx_busy++;
658 work_done = false;
659 break;
660 }
661
662 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
663 break;
664
665 dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
666
667 dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
668 DMA_BIDIRECTIONAL);
669
670 tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
671 tx_bi->bytecount = desc.len;
672
673 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
674 tx_desc->buffer_addr = cpu_to_le64(dma);
675 tx_desc->cmd_type_offset_bsz =
676 build_ctob(I40E_TX_DESC_CMD_ICRC
677 | I40E_TX_DESC_CMD_EOP,
678 0, desc.len, 0);
679
680 xdp_ring->next_to_use++;
681 if (xdp_ring->next_to_use == xdp_ring->count)
682 xdp_ring->next_to_use = 0;
683 }
684
685 if (tx_desc) {
686
687 tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
688 I40E_TXD_QW1_CMD_SHIFT);
689 i40e_xdp_ring_update_tail(xdp_ring);
690
691 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
692 }
693
694 return !!budget && work_done;
695 }
696
697
698
699
700
701
702 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
703 struct i40e_tx_buffer *tx_bi)
704 {
705 xdp_return_frame(tx_bi->xdpf);
706 dma_unmap_single(tx_ring->dev,
707 dma_unmap_addr(tx_bi, dma),
708 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
709 dma_unmap_len_set(tx_bi, len, 0);
710 }
711
712
713
714
715
716
717
718
719 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
720 struct i40e_ring *tx_ring, int napi_budget)
721 {
722 unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
723 u32 i, completed_frames, frames_ready, xsk_frames = 0;
724 struct xdp_umem *umem = tx_ring->xsk_umem;
725 u32 head_idx = i40e_get_head(tx_ring);
726 bool work_done = true, xmit_done;
727 struct i40e_tx_buffer *tx_bi;
728
729 if (head_idx < tx_ring->next_to_clean)
730 head_idx += tx_ring->count;
731 frames_ready = head_idx - tx_ring->next_to_clean;
732
733 if (frames_ready == 0) {
734 goto out_xmit;
735 } else if (frames_ready > budget) {
736 completed_frames = budget;
737 work_done = false;
738 } else {
739 completed_frames = frames_ready;
740 }
741
742 ntc = tx_ring->next_to_clean;
743
744 for (i = 0; i < completed_frames; i++) {
745 tx_bi = &tx_ring->tx_bi[ntc];
746
747 if (tx_bi->xdpf)
748 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
749 else
750 xsk_frames++;
751
752 tx_bi->xdpf = NULL;
753 total_bytes += tx_bi->bytecount;
754
755 if (++ntc >= tx_ring->count)
756 ntc = 0;
757 }
758
759 tx_ring->next_to_clean += completed_frames;
760 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
761 tx_ring->next_to_clean -= tx_ring->count;
762
763 if (xsk_frames)
764 xsk_umem_complete_tx(umem, xsk_frames);
765
766 i40e_arm_wb(tx_ring, vsi, budget);
767 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
768
769 out_xmit:
770 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
771 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
772
773 xmit_done = i40e_xmit_zc(tx_ring, budget);
774
775 return work_done && xmit_done;
776 }
777
778
779
780
781
782
783
784
785
786 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
787 {
788 struct i40e_netdev_priv *np = netdev_priv(dev);
789 struct i40e_vsi *vsi = np->vsi;
790 struct i40e_pf *pf = vsi->back;
791 struct i40e_ring *ring;
792
793 if (test_bit(__I40E_CONFIG_BUSY, pf->state))
794 return -EAGAIN;
795
796 if (test_bit(__I40E_VSI_DOWN, vsi->state))
797 return -ENETDOWN;
798
799 if (!i40e_enabled_xdp_vsi(vsi))
800 return -ENXIO;
801
802 if (queue_id >= vsi->num_queue_pairs)
803 return -ENXIO;
804
805 if (!vsi->xdp_rings[queue_id]->xsk_umem)
806 return -ENXIO;
807
808 ring = vsi->xdp_rings[queue_id];
809
810
811
812
813
814
815
816 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
817 i40e_force_wb(vsi, ring->q_vector);
818
819 return 0;
820 }
821
822 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
823 {
824 u16 i;
825
826 for (i = 0; i < rx_ring->count; i++) {
827 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
828
829 if (!rx_bi->addr)
830 continue;
831
832 xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
833 rx_bi->addr = NULL;
834 }
835 }
836
837
838
839
840
841 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
842 {
843 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
844 struct xdp_umem *umem = tx_ring->xsk_umem;
845 struct i40e_tx_buffer *tx_bi;
846 u32 xsk_frames = 0;
847
848 while (ntc != ntu) {
849 tx_bi = &tx_ring->tx_bi[ntc];
850
851 if (tx_bi->xdpf)
852 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
853 else
854 xsk_frames++;
855
856 tx_bi->xdpf = NULL;
857
858 ntc++;
859 if (ntc >= tx_ring->count)
860 ntc = 0;
861 }
862
863 if (xsk_frames)
864 xsk_umem_complete_tx(umem, xsk_frames);
865 }
866
867
868
869
870
871
872
873 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
874 {
875 struct net_device *netdev = vsi->netdev;
876 int i;
877
878 for (i = 0; i < vsi->num_queue_pairs; i++) {
879 if (xdp_get_umem_from_qid(netdev, i))
880 return true;
881 }
882
883 return false;
884 }