This source file includes following definitions.
- ice_unmap_and_free_tx_buf
- txring_txq
- ice_clean_tx_ring
- ice_free_tx_ring
- ice_clean_tx_irq
- ice_setup_tx_ring
- ice_clean_rx_ring
- ice_free_rx_ring
- ice_setup_rx_ring
- ice_release_rx_desc
- ice_alloc_mapped_page
- ice_alloc_rx_bufs
- ice_page_is_reserved
- ice_rx_buf_adjust_pg_offset
- ice_can_reuse_rx_page
- ice_add_rx_frag
- ice_reuse_rx_page
- ice_get_rx_buf
- ice_construct_skb
- ice_put_rx_buf
- ice_cleanup_headers
- ice_test_staterr
- ice_is_non_eop
- ice_ptype_to_htype
- ice_rx_hash
- ice_rx_csum
- ice_process_skb_fields
- ice_receive_skb
- ice_clean_rx_irq
- ice_adjust_itr_by_size_and_speed
- ice_update_itr
- ice_buildreg_itr
- ice_update_ena_itr
- ice_set_wb_on_itr
- ice_napi_poll
- build_ctob
- __ice_maybe_stop_tx
- ice_maybe_stop_tx
- ice_tx_map
- ice_tx_csum
- ice_tx_prepare_vlan_flags
- ice_tso
- ice_txd_use_count
- ice_xmit_desc_count
- __ice_chk_linearize
- ice_chk_linearize
- ice_xmit_frame_ring
- ice_start_xmit
1
2
3
4
5
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include "ice.h"
9 #include "ice_dcb_lib.h"
10
11 #define ICE_RX_HDR_SIZE 256
12
13
14
15
16
17
18 static void
19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
20 {
21 if (tx_buf->skb) {
22 dev_kfree_skb_any(tx_buf->skb);
23 if (dma_unmap_len(tx_buf, len))
24 dma_unmap_single(ring->dev,
25 dma_unmap_addr(tx_buf, dma),
26 dma_unmap_len(tx_buf, len),
27 DMA_TO_DEVICE);
28 } else if (dma_unmap_len(tx_buf, len)) {
29 dma_unmap_page(ring->dev,
30 dma_unmap_addr(tx_buf, dma),
31 dma_unmap_len(tx_buf, len),
32 DMA_TO_DEVICE);
33 }
34
35 tx_buf->next_to_watch = NULL;
36 tx_buf->skb = NULL;
37 dma_unmap_len_set(tx_buf, len, 0);
38
39 }
40
41 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
42 {
43 return netdev_get_tx_queue(ring->netdev, ring->q_index);
44 }
45
46
47
48
49
50 void ice_clean_tx_ring(struct ice_ring *tx_ring)
51 {
52 u16 i;
53
54
55 if (!tx_ring->tx_buf)
56 return;
57
58
59 for (i = 0; i < tx_ring->count; i++)
60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61
62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
63
64
65 memset(tx_ring->desc, 0, tx_ring->size);
66
67 tx_ring->next_to_use = 0;
68 tx_ring->next_to_clean = 0;
69
70 if (!tx_ring->netdev)
71 return;
72
73
74 netdev_tx_reset_queue(txring_txq(tx_ring));
75 }
76
77
78
79
80
81
82
83 void ice_free_tx_ring(struct ice_ring *tx_ring)
84 {
85 ice_clean_tx_ring(tx_ring);
86 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
87 tx_ring->tx_buf = NULL;
88
89 if (tx_ring->desc) {
90 dmam_free_coherent(tx_ring->dev, tx_ring->size,
91 tx_ring->desc, tx_ring->dma);
92 tx_ring->desc = NULL;
93 }
94 }
95
96
97
98
99
100
101
102
103 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
104 {
105 unsigned int total_bytes = 0, total_pkts = 0;
106 unsigned int budget = ICE_DFLT_IRQ_WORK;
107 struct ice_vsi *vsi = tx_ring->vsi;
108 s16 i = tx_ring->next_to_clean;
109 struct ice_tx_desc *tx_desc;
110 struct ice_tx_buf *tx_buf;
111
112 tx_buf = &tx_ring->tx_buf[i];
113 tx_desc = ICE_TX_DESC(tx_ring, i);
114 i -= tx_ring->count;
115
116 prefetch(&vsi->state);
117
118 do {
119 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
120
121
122 if (!eop_desc)
123 break;
124
125 smp_rmb();
126
127
128 if (!(eop_desc->cmd_type_offset_bsz &
129 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
130 break;
131
132
133 tx_buf->next_to_watch = NULL;
134
135
136 total_bytes += tx_buf->bytecount;
137 total_pkts += tx_buf->gso_segs;
138
139
140 napi_consume_skb(tx_buf->skb, napi_budget);
141
142
143 dma_unmap_single(tx_ring->dev,
144 dma_unmap_addr(tx_buf, dma),
145 dma_unmap_len(tx_buf, len),
146 DMA_TO_DEVICE);
147
148
149 tx_buf->skb = NULL;
150 dma_unmap_len_set(tx_buf, len, 0);
151
152
153 while (tx_desc != eop_desc) {
154 tx_buf++;
155 tx_desc++;
156 i++;
157 if (unlikely(!i)) {
158 i -= tx_ring->count;
159 tx_buf = tx_ring->tx_buf;
160 tx_desc = ICE_TX_DESC(tx_ring, 0);
161 }
162
163
164 if (dma_unmap_len(tx_buf, len)) {
165 dma_unmap_page(tx_ring->dev,
166 dma_unmap_addr(tx_buf, dma),
167 dma_unmap_len(tx_buf, len),
168 DMA_TO_DEVICE);
169 dma_unmap_len_set(tx_buf, len, 0);
170 }
171 }
172
173
174 tx_buf++;
175 tx_desc++;
176 i++;
177 if (unlikely(!i)) {
178 i -= tx_ring->count;
179 tx_buf = tx_ring->tx_buf;
180 tx_desc = ICE_TX_DESC(tx_ring, 0);
181 }
182
183 prefetch(tx_desc);
184
185
186 budget--;
187 } while (likely(budget));
188
189 i += tx_ring->count;
190 tx_ring->next_to_clean = i;
191 u64_stats_update_begin(&tx_ring->syncp);
192 tx_ring->stats.bytes += total_bytes;
193 tx_ring->stats.pkts += total_pkts;
194 u64_stats_update_end(&tx_ring->syncp);
195 tx_ring->q_vector->tx.total_bytes += total_bytes;
196 tx_ring->q_vector->tx.total_pkts += total_pkts;
197
198 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
199 total_bytes);
200
201 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
202 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
203 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
204
205
206
207 smp_mb();
208 if (__netif_subqueue_stopped(tx_ring->netdev,
209 tx_ring->q_index) &&
210 !test_bit(__ICE_DOWN, vsi->state)) {
211 netif_wake_subqueue(tx_ring->netdev,
212 tx_ring->q_index);
213 ++tx_ring->tx_stats.restart_q;
214 }
215 }
216
217 return !!budget;
218 }
219
220
221
222
223
224
225
226 int ice_setup_tx_ring(struct ice_ring *tx_ring)
227 {
228 struct device *dev = tx_ring->dev;
229
230 if (!dev)
231 return -ENOMEM;
232
233
234 WARN_ON(tx_ring->tx_buf);
235 tx_ring->tx_buf =
236 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
237 GFP_KERNEL);
238 if (!tx_ring->tx_buf)
239 return -ENOMEM;
240
241
242 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
243 PAGE_SIZE);
244 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
245 GFP_KERNEL);
246 if (!tx_ring->desc) {
247 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
248 tx_ring->size);
249 goto err;
250 }
251
252 tx_ring->next_to_use = 0;
253 tx_ring->next_to_clean = 0;
254 tx_ring->tx_stats.prev_pkt = -1;
255 return 0;
256
257 err:
258 devm_kfree(dev, tx_ring->tx_buf);
259 tx_ring->tx_buf = NULL;
260 return -ENOMEM;
261 }
262
263
264
265
266
267 void ice_clean_rx_ring(struct ice_ring *rx_ring)
268 {
269 struct device *dev = rx_ring->dev;
270 u16 i;
271
272
273 if (!rx_ring->rx_buf)
274 return;
275
276
277 for (i = 0; i < rx_ring->count; i++) {
278 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
279
280 if (rx_buf->skb) {
281 dev_kfree_skb(rx_buf->skb);
282 rx_buf->skb = NULL;
283 }
284 if (!rx_buf->page)
285 continue;
286
287
288
289
290 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
291 rx_buf->page_offset,
292 ICE_RXBUF_2048, DMA_FROM_DEVICE);
293
294
295 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
296 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
297 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
298
299 rx_buf->page = NULL;
300 rx_buf->page_offset = 0;
301 }
302
303 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
304
305
306 memset(rx_ring->desc, 0, rx_ring->size);
307
308 rx_ring->next_to_alloc = 0;
309 rx_ring->next_to_clean = 0;
310 rx_ring->next_to_use = 0;
311 }
312
313
314
315
316
317
318
319 void ice_free_rx_ring(struct ice_ring *rx_ring)
320 {
321 ice_clean_rx_ring(rx_ring);
322 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
323 rx_ring->rx_buf = NULL;
324
325 if (rx_ring->desc) {
326 dmam_free_coherent(rx_ring->dev, rx_ring->size,
327 rx_ring->desc, rx_ring->dma);
328 rx_ring->desc = NULL;
329 }
330 }
331
332
333
334
335
336
337
338 int ice_setup_rx_ring(struct ice_ring *rx_ring)
339 {
340 struct device *dev = rx_ring->dev;
341
342 if (!dev)
343 return -ENOMEM;
344
345
346 WARN_ON(rx_ring->rx_buf);
347 rx_ring->rx_buf =
348 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
349 GFP_KERNEL);
350 if (!rx_ring->rx_buf)
351 return -ENOMEM;
352
353
354 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
355 PAGE_SIZE);
356 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
357 GFP_KERNEL);
358 if (!rx_ring->desc) {
359 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
360 rx_ring->size);
361 goto err;
362 }
363
364 rx_ring->next_to_use = 0;
365 rx_ring->next_to_clean = 0;
366 return 0;
367
368 err:
369 devm_kfree(dev, rx_ring->rx_buf);
370 rx_ring->rx_buf = NULL;
371 return -ENOMEM;
372 }
373
374
375
376
377
378
379 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
380 {
381 u16 prev_ntu = rx_ring->next_to_use;
382
383 rx_ring->next_to_use = val;
384
385
386 rx_ring->next_to_alloc = val;
387
388
389
390
391
392
393 val &= ~0x7;
394 if (prev_ntu != val) {
395
396
397
398
399
400 wmb();
401 writel(val, rx_ring->tail);
402 }
403 }
404
405
406
407
408
409
410
411
412
413 static bool
414 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
415 {
416 struct page *page = bi->page;
417 dma_addr_t dma;
418
419
420 if (likely(page)) {
421 rx_ring->rx_stats.page_reuse_count++;
422 return true;
423 }
424
425
426 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
427 if (unlikely(!page)) {
428 rx_ring->rx_stats.alloc_page_failed++;
429 return false;
430 }
431
432
433 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
434 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
435
436
437
438
439 if (dma_mapping_error(rx_ring->dev, dma)) {
440 __free_pages(page, 0);
441 rx_ring->rx_stats.alloc_page_failed++;
442 return false;
443 }
444
445 bi->dma = dma;
446 bi->page = page;
447 bi->page_offset = 0;
448 page_ref_add(page, USHRT_MAX - 1);
449 bi->pagecnt_bias = USHRT_MAX;
450
451 return true;
452 }
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
468 {
469 union ice_32b_rx_flex_desc *rx_desc;
470 u16 ntu = rx_ring->next_to_use;
471 struct ice_rx_buf *bi;
472
473
474 if (!rx_ring->netdev || !cleaned_count)
475 return false;
476
477
478 rx_desc = ICE_RX_DESC(rx_ring, ntu);
479 bi = &rx_ring->rx_buf[ntu];
480
481 do {
482
483 if (!ice_alloc_mapped_page(rx_ring, bi))
484 break;
485
486
487 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
488 bi->page_offset,
489 ICE_RXBUF_2048,
490 DMA_FROM_DEVICE);
491
492
493
494
495 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
496
497 rx_desc++;
498 bi++;
499 ntu++;
500 if (unlikely(ntu == rx_ring->count)) {
501 rx_desc = ICE_RX_DESC(rx_ring, 0);
502 bi = rx_ring->rx_buf;
503 ntu = 0;
504 }
505
506
507 rx_desc->wb.status_error0 = 0;
508
509 cleaned_count--;
510 } while (cleaned_count);
511
512 if (rx_ring->next_to_use != ntu)
513 ice_release_rx_desc(rx_ring, ntu);
514
515 return !!cleaned_count;
516 }
517
518
519
520
521
522 static bool ice_page_is_reserved(struct page *page)
523 {
524 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
525 }
526
527
528
529
530
531
532
533
534
535
536
537 static void
538 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
539 {
540 #if (PAGE_SIZE < 8192)
541
542 rx_buf->page_offset ^= size;
543 #else
544
545 rx_buf->page_offset += size;
546 #endif
547 }
548
549
550
551
552
553
554
555
556
557
558 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
559 {
560 #if (PAGE_SIZE >= 8192)
561 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
562 #endif
563 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
564 struct page *page = rx_buf->page;
565
566
567 if (unlikely(ice_page_is_reserved(page)))
568 return false;
569
570 #if (PAGE_SIZE < 8192)
571
572 if (unlikely((page_count(page) - pagecnt_bias) > 1))
573 return false;
574 #else
575 if (rx_buf->page_offset > last_offset)
576 return false;
577 #endif
578
579
580
581
582
583 if (unlikely(pagecnt_bias == 1)) {
584 page_ref_add(page, USHRT_MAX - 1);
585 rx_buf->pagecnt_bias = USHRT_MAX;
586 }
587
588 return true;
589 }
590
591
592
593
594
595
596
597
598
599
600
601 static void
602 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
603 unsigned int size)
604 {
605 #if (PAGE_SIZE >= 8192)
606 unsigned int truesize = SKB_DATA_ALIGN(size);
607 #else
608 unsigned int truesize = ICE_RXBUF_2048;
609 #endif
610
611 if (!size)
612 return;
613 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
614 rx_buf->page_offset, size, truesize);
615
616
617 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
618 }
619
620
621
622
623
624
625
626
627 static void
628 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
629 {
630 u16 nta = rx_ring->next_to_alloc;
631 struct ice_rx_buf *new_buf;
632
633 new_buf = &rx_ring->rx_buf[nta];
634
635
636 nta++;
637 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
638
639
640
641
642
643 new_buf->dma = old_buf->dma;
644 new_buf->page = old_buf->page;
645 new_buf->page_offset = old_buf->page_offset;
646 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
647 }
648
649
650
651
652
653
654
655
656
657
658 static struct ice_rx_buf *
659 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
660 const unsigned int size)
661 {
662 struct ice_rx_buf *rx_buf;
663
664 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
665 prefetchw(rx_buf->page);
666 *skb = rx_buf->skb;
667
668 if (!size)
669 return rx_buf;
670
671 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
672 rx_buf->page_offset, size,
673 DMA_FROM_DEVICE);
674
675
676 rx_buf->pagecnt_bias--;
677
678 return rx_buf;
679 }
680
681
682
683
684
685
686
687
688
689
690
691 static struct sk_buff *
692 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
693 unsigned int size)
694 {
695 void *va = page_address(rx_buf->page) + rx_buf->page_offset;
696 unsigned int headlen;
697 struct sk_buff *skb;
698
699
700 prefetch(va);
701 #if L1_CACHE_BYTES < 128
702 prefetch((u8 *)va + L1_CACHE_BYTES);
703 #endif
704
705
706 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
707 GFP_ATOMIC | __GFP_NOWARN);
708 if (unlikely(!skb))
709 return NULL;
710
711 skb_record_rx_queue(skb, rx_ring->q_index);
712
713 headlen = size;
714 if (headlen > ICE_RX_HDR_SIZE)
715 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
716
717
718 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
719
720
721 size -= headlen;
722 if (size) {
723 #if (PAGE_SIZE >= 8192)
724 unsigned int truesize = SKB_DATA_ALIGN(size);
725 #else
726 unsigned int truesize = ICE_RXBUF_2048;
727 #endif
728 skb_add_rx_frag(skb, 0, rx_buf->page,
729 rx_buf->page_offset + headlen, size, truesize);
730
731 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
732 } else {
733
734
735
736
737 rx_buf->pagecnt_bias++;
738 }
739
740 return skb;
741 }
742
743
744
745
746
747
748
749
750
751 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
752 {
753 if (!rx_buf)
754 return;
755
756 if (ice_can_reuse_rx_page(rx_buf)) {
757
758 ice_reuse_rx_page(rx_ring, rx_buf);
759 rx_ring->rx_stats.page_reuse_count++;
760 } else {
761
762 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
763 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
764 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
765 }
766
767
768 rx_buf->page = NULL;
769 rx_buf->skb = NULL;
770 }
771
772
773
774
775
776
777
778
779
780
781
782
783
784 static bool ice_cleanup_headers(struct sk_buff *skb)
785 {
786
787 if (eth_skb_pad(skb))
788 return true;
789
790 return false;
791 }
792
793
794
795
796
797
798
799
800
801
802
803 static bool
804 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
805 {
806 return !!(rx_desc->wb.status_error0 &
807 cpu_to_le16(stat_err_bits));
808 }
809
810
811
812
813
814
815
816
817
818
819
820
821 static bool
822 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
823 struct sk_buff *skb)
824 {
825 u32 ntc = rx_ring->next_to_clean + 1;
826
827
828 ntc = (ntc < rx_ring->count) ? ntc : 0;
829 rx_ring->next_to_clean = ntc;
830
831 prefetch(ICE_RX_DESC(rx_ring, ntc));
832
833
834 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
835 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
836 return false;
837
838
839 rx_ring->rx_buf[ntc].skb = skb;
840 rx_ring->rx_stats.non_eop_descs++;
841
842 return true;
843 }
844
845
846
847
848
849
850
851 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
852 {
853 return PKT_HASH_TYPE_NONE;
854 }
855
856
857
858
859
860
861
862
863 static void
864 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
865 struct sk_buff *skb, u8 rx_ptype)
866 {
867 struct ice_32b_rx_flex_desc_nic *nic_mdid;
868 u32 hash;
869
870 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
871 return;
872
873 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
874 return;
875
876 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
877 hash = le32_to_cpu(nic_mdid->rss_hash);
878 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
879 }
880
881
882
883
884
885
886
887
888
889
890 static void
891 ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
892 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
893 {
894 struct ice_rx_ptype_decoded decoded;
895 u32 rx_error, rx_status;
896 bool ipv4, ipv6;
897
898 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
899 rx_error = rx_status;
900
901 decoded = ice_decode_rx_desc_ptype(ptype);
902
903
904 skb->ip_summed = CHECKSUM_NONE;
905 skb_checksum_none_assert(skb);
906
907
908 if (!(ring->netdev->features & NETIF_F_RXCSUM))
909 return;
910
911
912 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
913 return;
914
915 if (!(decoded.known && decoded.outer_ip))
916 return;
917
918 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
919 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
920 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
921 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
922
923 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
924 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
925 goto checksum_fail;
926 else if (ipv6 && (rx_status &
927 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
928 goto checksum_fail;
929
930
931
932
933 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
934 goto checksum_fail;
935
936
937 switch (decoded.inner_prot) {
938 case ICE_RX_PTYPE_INNER_PROT_TCP:
939 case ICE_RX_PTYPE_INNER_PROT_UDP:
940 case ICE_RX_PTYPE_INNER_PROT_SCTP:
941 skb->ip_summed = CHECKSUM_UNNECESSARY;
942 default:
943 break;
944 }
945 return;
946
947 checksum_fail:
948 ring->vsi->back->hw_csum_rx_error++;
949 }
950
951
952
953
954
955
956
957
958
959
960
961
962 static void
963 ice_process_skb_fields(struct ice_ring *rx_ring,
964 union ice_32b_rx_flex_desc *rx_desc,
965 struct sk_buff *skb, u8 ptype)
966 {
967 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
968
969
970 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
971
972 ice_rx_csum(rx_ring, skb, rx_desc, ptype);
973 }
974
975
976
977
978
979
980
981
982
983
984 static void
985 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
986 {
987 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
988 (vlan_tag & VLAN_VID_MASK))
989 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
990 napi_gro_receive(&rx_ring->q_vector->napi, skb);
991 }
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1006 {
1007 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1008 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1009 bool failure;
1010
1011
1012 while (likely(total_rx_pkts < (unsigned int)budget)) {
1013 union ice_32b_rx_flex_desc *rx_desc;
1014 struct ice_rx_buf *rx_buf;
1015 struct sk_buff *skb;
1016 unsigned int size;
1017 u16 stat_err_bits;
1018 u16 vlan_tag = 0;
1019 u8 rx_ptype;
1020
1021
1022 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1023
1024
1025
1026
1027
1028
1029 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1030 if (!ice_test_staterr(rx_desc, stat_err_bits))
1031 break;
1032
1033
1034
1035
1036
1037 dma_rmb();
1038
1039 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1040 ICE_RX_FLX_DESC_PKT_LEN_M;
1041
1042
1043 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1044
1045 if (skb)
1046 ice_add_rx_frag(rx_buf, skb, size);
1047 else
1048 skb = ice_construct_skb(rx_ring, rx_buf, size);
1049
1050
1051 if (!skb) {
1052 rx_ring->rx_stats.alloc_buf_failed++;
1053 if (rx_buf)
1054 rx_buf->pagecnt_bias++;
1055 break;
1056 }
1057
1058 ice_put_rx_buf(rx_ring, rx_buf);
1059 cleaned_count++;
1060
1061
1062 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1063 continue;
1064
1065 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1066 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1067 dev_kfree_skb_any(skb);
1068 continue;
1069 }
1070
1071 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1072 if (ice_test_staterr(rx_desc, stat_err_bits))
1073 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1074
1075
1076
1077
1078 if (ice_cleanup_headers(skb)) {
1079 skb = NULL;
1080 continue;
1081 }
1082
1083
1084 total_rx_bytes += skb->len;
1085
1086
1087 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1088 ICE_RX_FLEX_DESC_PTYPE_M;
1089
1090 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1091
1092
1093 ice_receive_skb(rx_ring, skb, vlan_tag);
1094
1095
1096 total_rx_pkts++;
1097 }
1098
1099
1100 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1101
1102
1103 u64_stats_update_begin(&rx_ring->syncp);
1104 rx_ring->stats.pkts += total_rx_pkts;
1105 rx_ring->stats.bytes += total_rx_bytes;
1106 u64_stats_update_end(&rx_ring->syncp);
1107 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1108 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1109
1110
1111 return failure ? budget : (int)total_rx_pkts;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 static unsigned int
1138 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1139 unsigned int avg_pkt_size,
1140 unsigned int itr)
1141 {
1142 switch (port_info->phy.link_info.link_speed) {
1143 case ICE_AQ_LINK_SPEED_100GB:
1144 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1145 avg_pkt_size + 640);
1146 break;
1147 case ICE_AQ_LINK_SPEED_50GB:
1148 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1149 avg_pkt_size + 640);
1150 break;
1151 case ICE_AQ_LINK_SPEED_40GB:
1152 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1153 avg_pkt_size + 640);
1154 break;
1155 case ICE_AQ_LINK_SPEED_25GB:
1156 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1157 avg_pkt_size + 640);
1158 break;
1159 case ICE_AQ_LINK_SPEED_20GB:
1160 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1161 avg_pkt_size + 640);
1162 break;
1163 case ICE_AQ_LINK_SPEED_10GB:
1164
1165 default:
1166 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1167 avg_pkt_size + 640);
1168 break;
1169 }
1170
1171 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1172 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1173 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1174 }
1175
1176 return itr;
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 static void
1193 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1194 {
1195 unsigned long next_update = jiffies;
1196 unsigned int packets, bytes, itr;
1197 bool container_is_rx;
1198
1199 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1200 return;
1201
1202
1203
1204
1205
1206
1207
1208 if (q_vector->itr_countdown) {
1209 itr = rc->target_itr;
1210 goto clear_counts;
1211 }
1212
1213 container_is_rx = (&q_vector->rx == rc);
1214
1215
1216
1217 itr = container_is_rx ?
1218 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1219 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1220
1221
1222
1223
1224
1225
1226 if (time_after(next_update, rc->next_update))
1227 goto clear_counts;
1228
1229 prefetch(q_vector->vsi->port_info);
1230
1231 packets = rc->total_pkts;
1232 bytes = rc->total_bytes;
1233
1234 if (container_is_rx) {
1235
1236
1237
1238
1239
1240 if (packets && packets < 4 && bytes < 9000 &&
1241 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1242 itr = ICE_ITR_ADAPTIVE_LATENCY;
1243 goto adjust_by_size_and_speed;
1244 }
1245 } else if (packets < 4) {
1246
1247
1248
1249
1250
1251 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1252 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1253 ICE_ITR_ADAPTIVE_MAX_USECS)
1254 goto clear_counts;
1255 } else if (packets > 32) {
1256
1257
1258
1259 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (packets < 56) {
1271 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1272 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1273 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1274 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1275 }
1276 goto clear_counts;
1277 }
1278
1279 if (packets <= 256) {
1280 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1281 itr &= ICE_ITR_MASK;
1282
1283
1284
1285
1286
1287 if (packets <= 112)
1288 goto clear_counts;
1289
1290
1291
1292
1293
1294
1295 itr >>= 1;
1296 itr &= ICE_ITR_MASK;
1297 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1298 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1299
1300 goto clear_counts;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309 itr = ICE_ITR_ADAPTIVE_BULK;
1310
1311 adjust_by_size_and_speed:
1312
1313
1314 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1315 bytes / packets, itr);
1316
1317 clear_counts:
1318
1319 rc->target_itr = itr;
1320
1321
1322 rc->next_update = next_update + 1;
1323
1324 rc->total_bytes = 0;
1325 rc->total_pkts = 0;
1326 }
1327
1328
1329
1330
1331
1332
1333 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1334 {
1335
1336
1337
1338
1339
1340
1341
1342 itr &= ICE_ITR_MASK;
1343
1344 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1345 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1346 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356 #define ITR_COUNTDOWN_START 3
1357
1358
1359
1360
1361
1362 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1363 {
1364 struct ice_ring_container *tx = &q_vector->tx;
1365 struct ice_ring_container *rx = &q_vector->rx;
1366 struct ice_vsi *vsi = q_vector->vsi;
1367 u32 itr_val;
1368
1369
1370
1371
1372
1373 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1374 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1375 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1376
1377 rx->target_itr = rx->itr_setting;
1378
1379 rx->current_itr = ICE_WB_ON_ITR_USECS |
1380 (rx->itr_setting & ICE_ITR_DYNAMIC);
1381
1382 q_vector->itr_countdown = 0;
1383 return;
1384 }
1385
1386
1387 ice_update_itr(q_vector, tx);
1388 ice_update_itr(q_vector, rx);
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 if (rx->target_itr < rx->current_itr) {
1399
1400 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1401 rx->current_itr = rx->target_itr;
1402 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1403 } else if ((tx->target_itr < tx->current_itr) ||
1404 ((rx->target_itr - rx->current_itr) <
1405 (tx->target_itr - tx->current_itr))) {
1406
1407
1408
1409 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1410 tx->current_itr = tx->target_itr;
1411 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1412 } else if (rx->current_itr != rx->target_itr) {
1413
1414 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1415 rx->current_itr = rx->target_itr;
1416 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1417 } else {
1418
1419 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1420 if (q_vector->itr_countdown)
1421 q_vector->itr_countdown--;
1422 }
1423
1424 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1425 wr32(&q_vector->vsi->back->hw,
1426 GLINT_DYN_CTL(q_vector->reg_idx),
1427 itr_val);
1428 }
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1445 {
1446 struct ice_vsi *vsi = q_vector->vsi;
1447
1448
1449 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1450 return;
1451
1452 if (q_vector->num_ring_rx)
1453 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1454 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1455 ICE_RX_ITR));
1456
1457 if (q_vector->num_ring_tx)
1458 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1459 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1460 ICE_TX_ITR));
1461
1462 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 int ice_napi_poll(struct napi_struct *napi, int budget)
1475 {
1476 struct ice_q_vector *q_vector =
1477 container_of(napi, struct ice_q_vector, napi);
1478 bool clean_complete = true;
1479 struct ice_ring *ring;
1480 int budget_per_ring;
1481 int work_done = 0;
1482
1483
1484
1485
1486 ice_for_each_ring(ring, q_vector->tx)
1487 if (!ice_clean_tx_irq(ring, budget))
1488 clean_complete = false;
1489
1490
1491 if (unlikely(budget <= 0))
1492 return budget;
1493
1494
1495 if (unlikely(q_vector->num_ring_rx > 1))
1496
1497
1498
1499
1500 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1501 else
1502
1503 budget_per_ring = budget;
1504
1505 ice_for_each_ring(ring, q_vector->rx) {
1506 int cleaned;
1507
1508 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1509 work_done += cleaned;
1510
1511 if (cleaned >= budget_per_ring)
1512 clean_complete = false;
1513 }
1514
1515
1516 if (!clean_complete)
1517 return budget;
1518
1519
1520
1521
1522 if (likely(napi_complete_done(napi, work_done)))
1523 ice_update_ena_itr(q_vector);
1524 else
1525 ice_set_wb_on_itr(q_vector);
1526
1527 return min_t(int, work_done, budget - 1);
1528 }
1529
1530
1531 static __le64
1532 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1533 {
1534 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1535 (td_cmd << ICE_TXD_QW1_CMD_S) |
1536 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1537 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1538 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1549 {
1550 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1551
1552 smp_mb();
1553
1554
1555 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1556 return -EBUSY;
1557
1558
1559 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1560 ++tx_ring->tx_stats.restart_q;
1561 return 0;
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1572 {
1573 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1574 return 0;
1575
1576 return __ice_maybe_stop_tx(tx_ring, size);
1577 }
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589 static void
1590 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1591 struct ice_tx_offload_params *off)
1592 {
1593 u64 td_offset, td_tag, td_cmd;
1594 u16 i = tx_ring->next_to_use;
1595 skb_frag_t *frag;
1596 unsigned int data_len, size;
1597 struct ice_tx_desc *tx_desc;
1598 struct ice_tx_buf *tx_buf;
1599 struct sk_buff *skb;
1600 dma_addr_t dma;
1601
1602 td_tag = off->td_l2tag1;
1603 td_cmd = off->td_cmd;
1604 td_offset = off->td_offset;
1605 skb = first->skb;
1606
1607 data_len = skb->data_len;
1608 size = skb_headlen(skb);
1609
1610 tx_desc = ICE_TX_DESC(tx_ring, i);
1611
1612 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1613 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1614 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1615 ICE_TX_FLAGS_VLAN_S;
1616 }
1617
1618 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1619
1620 tx_buf = first;
1621
1622 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1623 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1624
1625 if (dma_mapping_error(tx_ring->dev, dma))
1626 goto dma_error;
1627
1628
1629 dma_unmap_len_set(tx_buf, len, size);
1630 dma_unmap_addr_set(tx_buf, dma, dma);
1631
1632
1633 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1634 tx_desc->buf_addr = cpu_to_le64(dma);
1635
1636
1637
1638
1639 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1640 tx_desc->cmd_type_offset_bsz =
1641 build_ctob(td_cmd, td_offset, max_data, td_tag);
1642
1643 tx_desc++;
1644 i++;
1645
1646 if (i == tx_ring->count) {
1647 tx_desc = ICE_TX_DESC(tx_ring, 0);
1648 i = 0;
1649 }
1650
1651 dma += max_data;
1652 size -= max_data;
1653
1654 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1655 tx_desc->buf_addr = cpu_to_le64(dma);
1656 }
1657
1658 if (likely(!data_len))
1659 break;
1660
1661 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1662 size, td_tag);
1663
1664 tx_desc++;
1665 i++;
1666
1667 if (i == tx_ring->count) {
1668 tx_desc = ICE_TX_DESC(tx_ring, 0);
1669 i = 0;
1670 }
1671
1672 size = skb_frag_size(frag);
1673 data_len -= size;
1674
1675 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1676 DMA_TO_DEVICE);
1677
1678 tx_buf = &tx_ring->tx_buf[i];
1679 }
1680
1681
1682 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1683
1684
1685 skb_tx_timestamp(first->skb);
1686
1687 i++;
1688 if (i == tx_ring->count)
1689 i = 0;
1690
1691
1692 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1693 tx_desc->cmd_type_offset_bsz =
1694 build_ctob(td_cmd, td_offset, size, td_tag);
1695
1696
1697
1698
1699
1700
1701
1702 wmb();
1703
1704
1705 first->next_to_watch = tx_desc;
1706
1707 tx_ring->next_to_use = i;
1708
1709 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1710
1711
1712 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1713 writel(i, tx_ring->tail);
1714 }
1715
1716 return;
1717
1718 dma_error:
1719
1720 for (;;) {
1721 tx_buf = &tx_ring->tx_buf[i];
1722 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1723 if (tx_buf == first)
1724 break;
1725 if (i == 0)
1726 i = tx_ring->count;
1727 i--;
1728 }
1729
1730 tx_ring->next_to_use = i;
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740 static
1741 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1742 {
1743 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1744 struct sk_buff *skb = first->skb;
1745 union {
1746 struct iphdr *v4;
1747 struct ipv6hdr *v6;
1748 unsigned char *hdr;
1749 } ip;
1750 union {
1751 struct tcphdr *tcp;
1752 unsigned char *hdr;
1753 } l4;
1754 __be16 frag_off, protocol;
1755 unsigned char *exthdr;
1756 u32 offset, cmd = 0;
1757 u8 l4_proto = 0;
1758
1759 if (skb->ip_summed != CHECKSUM_PARTIAL)
1760 return 0;
1761
1762 ip.hdr = skb_network_header(skb);
1763 l4.hdr = skb_transport_header(skb);
1764
1765
1766 l2_len = ip.hdr - skb->data;
1767 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1768
1769 if (skb->encapsulation)
1770 return -1;
1771
1772
1773 protocol = vlan_get_protocol(skb);
1774 if (protocol == htons(ETH_P_IP)) {
1775 l4_proto = ip.v4->protocol;
1776
1777
1778
1779 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1780 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1781 else
1782 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1783
1784 } else if (protocol == htons(ETH_P_IPV6)) {
1785 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1786 exthdr = ip.hdr + sizeof(*ip.v6);
1787 l4_proto = ip.v6->nexthdr;
1788 if (l4.hdr != exthdr)
1789 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1790 &frag_off);
1791 } else {
1792 return -1;
1793 }
1794
1795
1796 l3_len = l4.hdr - ip.hdr;
1797 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1798
1799
1800 switch (l4_proto) {
1801 case IPPROTO_TCP:
1802
1803 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1804 l4_len = l4.tcp->doff;
1805 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1806 break;
1807 case IPPROTO_UDP:
1808
1809 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1810 l4_len = (sizeof(struct udphdr) >> 2);
1811 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1812 break;
1813 case IPPROTO_SCTP:
1814
1815 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1816 l4_len = sizeof(struct sctphdr) >> 2;
1817 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1818 break;
1819
1820 default:
1821 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1822 return -1;
1823 skb_checksum_help(skb);
1824 return 0;
1825 }
1826
1827 off->td_cmd |= cmd;
1828 off->td_offset |= offset;
1829 return 1;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 static int
1844 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1845 {
1846 struct sk_buff *skb = first->skb;
1847 __be16 protocol = skb->protocol;
1848
1849 if (protocol == htons(ETH_P_8021Q) &&
1850 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1851
1852
1853
1854
1855
1856
1857
1858 skb->protocol = vlan_get_protocol(skb);
1859 return 0;
1860 }
1861
1862
1863 if (skb_vlan_tag_present(skb)) {
1864 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1865 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1866 } else if (protocol == htons(ETH_P_8021Q)) {
1867 struct vlan_hdr *vhdr, _vhdr;
1868
1869
1870 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1871 sizeof(_vhdr),
1872 &_vhdr);
1873 if (!vhdr)
1874 return -EINVAL;
1875
1876 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1877 ICE_TX_FLAGS_VLAN_S;
1878 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1879 }
1880
1881 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891 static
1892 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1893 {
1894 struct sk_buff *skb = first->skb;
1895 union {
1896 struct iphdr *v4;
1897 struct ipv6hdr *v6;
1898 unsigned char *hdr;
1899 } ip;
1900 union {
1901 struct tcphdr *tcp;
1902 unsigned char *hdr;
1903 } l4;
1904 u64 cd_mss, cd_tso_len;
1905 u32 paylen, l4_start;
1906 int err;
1907
1908 if (skb->ip_summed != CHECKSUM_PARTIAL)
1909 return 0;
1910
1911 if (!skb_is_gso(skb))
1912 return 0;
1913
1914 err = skb_cow_head(skb, 0);
1915 if (err < 0)
1916 return err;
1917
1918
1919 ip.hdr = skb_network_header(skb);
1920 l4.hdr = skb_transport_header(skb);
1921
1922
1923 if (ip.v4->version == 4) {
1924 ip.v4->tot_len = 0;
1925 ip.v4->check = 0;
1926 } else {
1927 ip.v6->payload_len = 0;
1928 }
1929
1930
1931 l4_start = l4.hdr - skb->data;
1932
1933
1934 paylen = skb->len - l4_start;
1935 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1936
1937
1938 off->header_len = (l4.tcp->doff * 4) + l4_start;
1939
1940
1941 first->gso_segs = skb_shinfo(skb)->gso_segs;
1942 first->bytecount += (first->gso_segs - 1) * off->header_len;
1943
1944 cd_tso_len = skb->len - off->header_len;
1945 cd_mss = skb_shinfo(skb)->gso_size;
1946
1947
1948 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1949 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1950 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1951 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1952 first->tx_flags |= ICE_TX_FLAGS_TSO;
1953 return 1;
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 static unsigned int ice_txd_use_count(unsigned int size)
1985 {
1986 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1987 }
1988
1989
1990
1991
1992
1993
1994
1995 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1996 {
1997 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1998 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1999 unsigned int count = 0, size = skb_headlen(skb);
2000
2001 for (;;) {
2002 count += ice_txd_use_count(size);
2003
2004 if (!nr_frags--)
2005 break;
2006
2007 size = skb_frag_size(frag++);
2008 }
2009
2010 return count;
2011 }
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026 static bool __ice_chk_linearize(struct sk_buff *skb)
2027 {
2028 const skb_frag_t *frag, *stale;
2029 int nr_frags, sum;
2030
2031
2032 nr_frags = skb_shinfo(skb)->nr_frags;
2033 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2034 return false;
2035
2036
2037
2038
2039 nr_frags -= ICE_MAX_BUF_TXD - 2;
2040 frag = &skb_shinfo(skb)->frags[0];
2041
2042
2043
2044
2045
2046
2047
2048 sum = 1 - skb_shinfo(skb)->gso_size;
2049
2050
2051 sum += skb_frag_size(frag++);
2052 sum += skb_frag_size(frag++);
2053 sum += skb_frag_size(frag++);
2054 sum += skb_frag_size(frag++);
2055 sum += skb_frag_size(frag++);
2056
2057
2058
2059
2060 stale = &skb_shinfo(skb)->frags[0];
2061 for (;;) {
2062 sum += skb_frag_size(frag++);
2063
2064
2065 if (sum < 0)
2066 return true;
2067
2068 if (!nr_frags--)
2069 break;
2070
2071 sum -= skb_frag_size(stale++);
2072 }
2073
2074 return false;
2075 }
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2087 {
2088
2089 if (likely(count < ICE_MAX_BUF_TXD))
2090 return false;
2091
2092 if (skb_is_gso(skb))
2093 return __ice_chk_linearize(skb);
2094
2095
2096 return count != ICE_MAX_BUF_TXD;
2097 }
2098
2099
2100
2101
2102
2103
2104
2105
2106 static netdev_tx_t
2107 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2108 {
2109 struct ice_tx_offload_params offload = { 0 };
2110 struct ice_vsi *vsi = tx_ring->vsi;
2111 struct ice_tx_buf *first;
2112 unsigned int count;
2113 int tso, csum;
2114
2115 count = ice_xmit_desc_count(skb);
2116 if (ice_chk_linearize(skb, count)) {
2117 if (__skb_linearize(skb))
2118 goto out_drop;
2119 count = ice_txd_use_count(skb->len);
2120 tx_ring->tx_stats.tx_linearize++;
2121 }
2122
2123
2124
2125
2126
2127
2128
2129 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2130 ICE_DESCS_FOR_CTX_DESC)) {
2131 tx_ring->tx_stats.tx_busy++;
2132 return NETDEV_TX_BUSY;
2133 }
2134
2135 offload.tx_ring = tx_ring;
2136
2137
2138 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2139 first->skb = skb;
2140 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2141 first->gso_segs = 1;
2142 first->tx_flags = 0;
2143
2144
2145 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2146 goto out_drop;
2147
2148
2149 tso = ice_tso(first, &offload);
2150 if (tso < 0)
2151 goto out_drop;
2152
2153
2154 csum = ice_tx_csum(first, &offload);
2155 if (csum < 0)
2156 goto out_drop;
2157
2158
2159 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2160 vsi->type == ICE_VSI_PF &&
2161 vsi->port_info->is_sw_lldp))
2162 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2163 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2164 ICE_TXD_CTX_QW1_CMD_S);
2165
2166 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2167 struct ice_tx_ctx_desc *cdesc;
2168 int i = tx_ring->next_to_use;
2169
2170
2171 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2172 i++;
2173 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2174
2175
2176 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2177 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2178 cdesc->rsvd = cpu_to_le16(0);
2179 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2180 }
2181
2182 ice_tx_map(tx_ring, first, &offload);
2183 return NETDEV_TX_OK;
2184
2185 out_drop:
2186 dev_kfree_skb_any(skb);
2187 return NETDEV_TX_OK;
2188 }
2189
2190
2191
2192
2193
2194
2195
2196
2197 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2198 {
2199 struct ice_netdev_priv *np = netdev_priv(netdev);
2200 struct ice_vsi *vsi = np->vsi;
2201 struct ice_ring *tx_ring;
2202
2203 tx_ring = vsi->tx_rings[skb->queue_mapping];
2204
2205
2206
2207
2208 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2209 return NETDEV_TX_OK;
2210
2211 return ice_xmit_frame_ring(skb, tx_ring);
2212 }