This source file includes following definitions.
- ionic_txq_post
- ionic_rxq_post
- q_to_ndq
- ionic_rx_recycle
- ionic_rx_copybreak
- ionic_rx_clean
- ionic_rx_service
- ionic_rx_walk_cq
- ionic_rx_flush
- ionic_rx_skb_alloc
- ionic_rx_fill
- ionic_rx_fill_cb
- ionic_rx_empty
- ionic_rx_napi
- ionic_tx_map_single
- ionic_tx_map_frag
- ionic_tx_clean
- ionic_tx_flush
- ionic_tx_tcp_inner_pseudo_csum
- ionic_tx_tcp_pseudo_csum
- ionic_tx_tso_post
- ionic_tx_tso_next
- ionic_tx_tso
- ionic_tx_calc_csum
- ionic_tx_calc_no_csum
- ionic_tx_skb_frags
- ionic_tx
- ionic_tx_descs_needed
- ionic_maybe_stop_tx
- ionic_start_xmit
1
2
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12
13 static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
14 struct ionic_cq_info *cq_info, void *cb_arg);
15
16 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
17 ionic_desc_cb cb_func, void *cb_arg)
18 {
19 DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
20
21 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
22 }
23
24 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
25 ionic_desc_cb cb_func, void *cb_arg)
26 {
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
28
29 DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
30 }
31
32 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
33 {
34 return netdev_get_tx_queue(q->lif->netdev, q->index);
35 }
36
37 static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info,
38 struct sk_buff *skb)
39 {
40 struct ionic_rxq_desc *old = desc_info->desc;
41 struct ionic_rxq_desc *new = q->head->desc;
42
43 new->addr = old->addr;
44 new->len = old->len;
45
46 ionic_rxq_post(q, true, ionic_rx_clean, skb);
47 }
48
49 static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info,
50 struct ionic_cq_info *cq_info, struct sk_buff **skb)
51 {
52 struct ionic_rxq_comp *comp = cq_info->cq_desc;
53 struct ionic_rxq_desc *desc = desc_info->desc;
54 struct net_device *netdev = q->lif->netdev;
55 struct device *dev = q->lif->ionic->dev;
56 struct sk_buff *new_skb;
57 u16 clen, dlen;
58
59 clen = le16_to_cpu(comp->len);
60 dlen = le16_to_cpu(desc->len);
61 if (clen > q->lif->rx_copybreak) {
62 dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
63 dlen, DMA_FROM_DEVICE);
64 return false;
65 }
66
67 new_skb = netdev_alloc_skb_ip_align(netdev, clen);
68 if (!new_skb) {
69 dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr),
70 dlen, DMA_FROM_DEVICE);
71 return false;
72 }
73
74 dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr),
75 clen, DMA_FROM_DEVICE);
76
77 memcpy(new_skb->data, (*skb)->data, clen);
78
79 ionic_rx_recycle(q, desc_info, *skb);
80 *skb = new_skb;
81
82 return true;
83 }
84
85 static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
86 struct ionic_cq_info *cq_info, void *cb_arg)
87 {
88 struct ionic_rxq_comp *comp = cq_info->cq_desc;
89 struct ionic_qcq *qcq = q_to_qcq(q);
90 struct sk_buff *skb = cb_arg;
91 struct ionic_rx_stats *stats;
92 struct net_device *netdev;
93
94 stats = q_to_rx_stats(q);
95 netdev = q->lif->netdev;
96
97 if (comp->status) {
98 ionic_rx_recycle(q, desc_info, skb);
99 return;
100 }
101
102 if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
103
104 ionic_rx_recycle(q, desc_info, skb);
105 return;
106 }
107
108 stats->pkts++;
109 stats->bytes += le16_to_cpu(comp->len);
110
111 ionic_rx_copybreak(q, desc_info, cq_info, &skb);
112
113 skb_put(skb, le16_to_cpu(comp->len));
114 skb->protocol = eth_type_trans(skb, netdev);
115
116 skb_record_rx_queue(skb, q->index);
117
118 if (netdev->features & NETIF_F_RXHASH) {
119 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
120 case IONIC_PKT_TYPE_IPV4:
121 case IONIC_PKT_TYPE_IPV6:
122 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
123 PKT_HASH_TYPE_L3);
124 break;
125 case IONIC_PKT_TYPE_IPV4_TCP:
126 case IONIC_PKT_TYPE_IPV6_TCP:
127 case IONIC_PKT_TYPE_IPV4_UDP:
128 case IONIC_PKT_TYPE_IPV6_UDP:
129 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
130 PKT_HASH_TYPE_L4);
131 break;
132 }
133 }
134
135 if (netdev->features & NETIF_F_RXCSUM) {
136 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
137 skb->ip_summed = CHECKSUM_COMPLETE;
138 skb->csum = (__wsum)le16_to_cpu(comp->csum);
139 stats->csum_complete++;
140 }
141 } else {
142 stats->csum_none++;
143 }
144
145 if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
146 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
147 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD))
148 stats->csum_error++;
149
150 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
151 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
152 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
153 le16_to_cpu(comp->vlan_tci));
154 }
155
156 napi_gro_receive(&qcq->napi, skb);
157 }
158
159 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
160 {
161 struct ionic_rxq_comp *comp = cq_info->cq_desc;
162 struct ionic_queue *q = cq->bound_q;
163 struct ionic_desc_info *desc_info;
164
165 if (!color_match(comp->pkt_type_color, cq->done_color))
166 return false;
167
168
169 if (q->tail->index == q->head->index)
170 return false;
171
172 desc_info = q->tail;
173 if (desc_info->index != le16_to_cpu(comp->comp_index))
174 return false;
175
176 q->tail = desc_info->next;
177
178
179 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
180
181 desc_info->cb = NULL;
182 desc_info->cb_arg = NULL;
183
184 return true;
185 }
186
187 static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
188 {
189 u32 work_done = 0;
190
191 while (ionic_rx_service(rxcq, rxcq->tail)) {
192 if (rxcq->tail->last)
193 rxcq->done_color = !rxcq->done_color;
194 rxcq->tail = rxcq->tail->next;
195 DEBUG_STATS_CQE_CNT(rxcq);
196
197 if (++work_done >= limit)
198 break;
199 }
200
201 return work_done;
202 }
203
204 void ionic_rx_flush(struct ionic_cq *cq)
205 {
206 struct ionic_dev *idev = &cq->lif->ionic->idev;
207 u32 work_done;
208
209 work_done = ionic_rx_walk_cq(cq, cq->num_descs);
210
211 if (work_done)
212 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
213 work_done, IONIC_INTR_CRED_RESET_COALESCE);
214 }
215
216 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len,
217 dma_addr_t *dma_addr)
218 {
219 struct ionic_lif *lif = q->lif;
220 struct ionic_rx_stats *stats;
221 struct net_device *netdev;
222 struct sk_buff *skb;
223 struct device *dev;
224
225 netdev = lif->netdev;
226 dev = lif->ionic->dev;
227 stats = q_to_rx_stats(q);
228 skb = netdev_alloc_skb_ip_align(netdev, len);
229 if (!skb) {
230 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
231 netdev->name, q->name);
232 stats->alloc_err++;
233 return NULL;
234 }
235
236 *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
237 if (dma_mapping_error(dev, *dma_addr)) {
238 dev_kfree_skb(skb);
239 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
240 netdev->name, q->name);
241 stats->dma_map_err++;
242 return NULL;
243 }
244
245 return skb;
246 }
247
248 #define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1)
249
250 void ionic_rx_fill(struct ionic_queue *q)
251 {
252 struct net_device *netdev = q->lif->netdev;
253 struct ionic_rxq_desc *desc;
254 struct sk_buff *skb;
255 dma_addr_t dma_addr;
256 bool ring_doorbell;
257 unsigned int len;
258 unsigned int i;
259
260 len = netdev->mtu + ETH_HLEN;
261
262 for (i = ionic_q_space_avail(q); i; i--) {
263 skb = ionic_rx_skb_alloc(q, len, &dma_addr);
264 if (!skb)
265 return;
266
267 desc = q->head->desc;
268 desc->addr = cpu_to_le64(dma_addr);
269 desc->len = cpu_to_le16(len);
270 desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE;
271
272 ring_doorbell = ((q->head->index + 1) &
273 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
274
275 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb);
276 }
277 }
278
279 static void ionic_rx_fill_cb(void *arg)
280 {
281 ionic_rx_fill(arg);
282 }
283
284 void ionic_rx_empty(struct ionic_queue *q)
285 {
286 struct device *dev = q->lif->ionic->dev;
287 struct ionic_desc_info *cur;
288 struct ionic_rxq_desc *desc;
289
290 for (cur = q->tail; cur != q->head; cur = cur->next) {
291 desc = cur->desc;
292 dma_unmap_single(dev, le64_to_cpu(desc->addr),
293 le16_to_cpu(desc->len), DMA_FROM_DEVICE);
294 dev_kfree_skb(cur->cb_arg);
295 cur->cb_arg = NULL;
296 }
297 }
298
299 int ionic_rx_napi(struct napi_struct *napi, int budget)
300 {
301 struct ionic_qcq *qcq = napi_to_qcq(napi);
302 struct ionic_cq *rxcq = napi_to_cq(napi);
303 unsigned int qi = rxcq->bound_q->index;
304 struct ionic_dev *idev;
305 struct ionic_lif *lif;
306 struct ionic_cq *txcq;
307 u32 work_done = 0;
308 u32 flags = 0;
309
310 lif = rxcq->bound_q->lif;
311 idev = &lif->ionic->idev;
312 txcq = &lif->txqcqs[qi].qcq->cq;
313
314 ionic_tx_flush(txcq);
315
316 work_done = ionic_rx_walk_cq(rxcq, budget);
317
318 if (work_done)
319 ionic_rx_fill_cb(rxcq->bound_q);
320
321 if (work_done < budget && napi_complete_done(napi, work_done)) {
322 flags |= IONIC_INTR_CRED_UNMASK;
323 DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
324 }
325
326 if (work_done || flags) {
327 flags |= IONIC_INTR_CRED_RESET_COALESCE;
328 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
329 work_done, flags);
330 }
331
332 DEBUG_STATS_NAPI_POLL(qcq, work_done);
333
334 return work_done;
335 }
336
337 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
338 {
339 struct ionic_tx_stats *stats = q_to_tx_stats(q);
340 struct device *dev = q->lif->ionic->dev;
341 dma_addr_t dma_addr;
342
343 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
344 if (dma_mapping_error(dev, dma_addr)) {
345 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
346 q->lif->netdev->name, q->name);
347 stats->dma_map_err++;
348 return 0;
349 }
350 return dma_addr;
351 }
352
353 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
354 size_t offset, size_t len)
355 {
356 struct ionic_tx_stats *stats = q_to_tx_stats(q);
357 struct device *dev = q->lif->ionic->dev;
358 dma_addr_t dma_addr;
359
360 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
361 if (dma_mapping_error(dev, dma_addr)) {
362 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
363 q->lif->netdev->name, q->name);
364 stats->dma_map_err++;
365 }
366 return dma_addr;
367 }
368
369 static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
370 struct ionic_cq_info *cq_info, void *cb_arg)
371 {
372 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
373 struct ionic_txq_sg_elem *elem = sg_desc->elems;
374 struct ionic_tx_stats *stats = q_to_tx_stats(q);
375 struct ionic_txq_desc *desc = desc_info->desc;
376 struct device *dev = q->lif->ionic->dev;
377 u8 opcode, flags, nsge;
378 u16 queue_index;
379 unsigned int i;
380 u64 addr;
381
382 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
383 &opcode, &flags, &nsge, &addr);
384
385
386
387
388 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
389 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
390 dma_unmap_single(dev, (dma_addr_t)addr,
391 le16_to_cpu(desc->len), DMA_TO_DEVICE);
392 else
393 dma_unmap_page(dev, (dma_addr_t)addr,
394 le16_to_cpu(desc->len), DMA_TO_DEVICE);
395
396 for (i = 0; i < nsge; i++, elem++)
397 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
398 le16_to_cpu(elem->len), DMA_TO_DEVICE);
399
400 if (cb_arg) {
401 struct sk_buff *skb = cb_arg;
402 u32 len = skb->len;
403
404 queue_index = skb_get_queue_mapping(skb);
405 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
406 queue_index))) {
407 netif_wake_subqueue(q->lif->netdev, queue_index);
408 q->wake++;
409 }
410 dev_kfree_skb_any(skb);
411 stats->clean++;
412 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
413 }
414 }
415
416 void ionic_tx_flush(struct ionic_cq *cq)
417 {
418 struct ionic_txq_comp *comp = cq->tail->cq_desc;
419 struct ionic_dev *idev = &cq->lif->ionic->idev;
420 struct ionic_queue *q = cq->bound_q;
421 struct ionic_desc_info *desc_info;
422 unsigned int work_done = 0;
423
424
425 while (work_done < cq->num_descs &&
426 color_match(comp->color, cq->done_color)) {
427
428
429
430
431 do {
432 desc_info = q->tail;
433 q->tail = desc_info->next;
434 ionic_tx_clean(q, desc_info, cq->tail,
435 desc_info->cb_arg);
436 desc_info->cb = NULL;
437 desc_info->cb_arg = NULL;
438 } while (desc_info->index != le16_to_cpu(comp->comp_index));
439
440 if (cq->tail->last)
441 cq->done_color = !cq->done_color;
442
443 cq->tail = cq->tail->next;
444 comp = cq->tail->cq_desc;
445 DEBUG_STATS_CQE_CNT(cq);
446
447 work_done++;
448 }
449
450 if (work_done)
451 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
452 work_done, 0);
453 }
454
455 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
456 {
457 int err;
458
459 err = skb_cow_head(skb, 0);
460 if (err)
461 return err;
462
463 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
464 inner_ip_hdr(skb)->check = 0;
465 inner_tcp_hdr(skb)->check =
466 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
467 inner_ip_hdr(skb)->daddr,
468 0, IPPROTO_TCP, 0);
469 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
470 inner_tcp_hdr(skb)->check =
471 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
472 &inner_ipv6_hdr(skb)->daddr,
473 0, IPPROTO_TCP, 0);
474 }
475
476 return 0;
477 }
478
479 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
480 {
481 int err;
482
483 err = skb_cow_head(skb, 0);
484 if (err)
485 return err;
486
487 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
488 ip_hdr(skb)->check = 0;
489 tcp_hdr(skb)->check =
490 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
491 ip_hdr(skb)->daddr,
492 0, IPPROTO_TCP, 0);
493 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
494 tcp_hdr(skb)->check =
495 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
496 &ipv6_hdr(skb)->daddr,
497 0, IPPROTO_TCP, 0);
498 }
499
500 return 0;
501 }
502
503 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
504 struct sk_buff *skb,
505 dma_addr_t addr, u8 nsge, u16 len,
506 unsigned int hdrlen, unsigned int mss,
507 bool outer_csum,
508 u16 vlan_tci, bool has_vlan,
509 bool start, bool done)
510 {
511 u8 flags = 0;
512 u64 cmd;
513
514 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
515 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
516 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
517 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
518
519 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
520 desc->cmd = cpu_to_le64(cmd);
521 desc->len = cpu_to_le16(len);
522 desc->vlan_tci = cpu_to_le16(vlan_tci);
523 desc->hdr_len = cpu_to_le16(hdrlen);
524 desc->mss = cpu_to_le16(mss);
525
526 if (done) {
527 skb_tx_timestamp(skb);
528 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
529 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
530 } else {
531 ionic_txq_post(q, false, ionic_tx_clean, NULL);
532 }
533 }
534
535 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
536 struct ionic_txq_sg_elem **elem)
537 {
538 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
539 struct ionic_txq_desc *desc = q->head->desc;
540
541 *elem = sg_desc->elems;
542 return desc;
543 }
544
545 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
546 {
547 struct ionic_tx_stats *stats = q_to_tx_stats(q);
548 struct ionic_desc_info *abort = q->head;
549 struct device *dev = q->lif->ionic->dev;
550 struct ionic_desc_info *rewind = abort;
551 struct ionic_txq_sg_elem *elem;
552 struct ionic_txq_desc *desc;
553 unsigned int frag_left = 0;
554 unsigned int offset = 0;
555 unsigned int len_left;
556 dma_addr_t desc_addr;
557 unsigned int hdrlen;
558 unsigned int nfrags;
559 unsigned int seglen;
560 u64 total_bytes = 0;
561 u64 total_pkts = 0;
562 unsigned int left;
563 unsigned int len;
564 unsigned int mss;
565 skb_frag_t *frag;
566 bool start, done;
567 bool outer_csum;
568 bool has_vlan;
569 u16 desc_len;
570 u8 desc_nsge;
571 u16 vlan_tci;
572 bool encap;
573 int err;
574
575 mss = skb_shinfo(skb)->gso_size;
576 nfrags = skb_shinfo(skb)->nr_frags;
577 len_left = skb->len - skb_headlen(skb);
578 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
579 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
580 has_vlan = !!skb_vlan_tag_present(skb);
581 vlan_tci = skb_vlan_tag_get(skb);
582 encap = skb->encapsulation;
583
584
585
586
587
588
589 if (encap)
590 err = ionic_tx_tcp_inner_pseudo_csum(skb);
591 else
592 err = ionic_tx_tcp_pseudo_csum(skb);
593 if (err)
594 return err;
595
596 if (encap)
597 hdrlen = skb_inner_transport_header(skb) - skb->data +
598 inner_tcp_hdrlen(skb);
599 else
600 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
601
602 seglen = hdrlen + mss;
603 left = skb_headlen(skb);
604
605 desc = ionic_tx_tso_next(q, &elem);
606 start = true;
607
608
609
610 while (left > 0) {
611 len = min(seglen, left);
612 frag_left = seglen - len;
613 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
614 if (dma_mapping_error(dev, desc_addr))
615 goto err_out_abort;
616 desc_len = len;
617 desc_nsge = 0;
618 left -= len;
619 offset += len;
620 if (nfrags > 0 && frag_left > 0)
621 continue;
622 done = (nfrags == 0 && left == 0);
623 ionic_tx_tso_post(q, desc, skb,
624 desc_addr, desc_nsge, desc_len,
625 hdrlen, mss,
626 outer_csum,
627 vlan_tci, has_vlan,
628 start, done);
629 total_pkts++;
630 total_bytes += start ? len : len + hdrlen;
631 desc = ionic_tx_tso_next(q, &elem);
632 start = false;
633 seglen = mss;
634 }
635
636
637
638 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
639 offset = 0;
640 left = skb_frag_size(frag);
641 len_left -= left;
642 nfrags--;
643 stats->frags++;
644
645 while (left > 0) {
646 if (frag_left > 0) {
647 len = min(frag_left, left);
648 frag_left -= len;
649 elem->addr =
650 cpu_to_le64(ionic_tx_map_frag(q, frag,
651 offset, len));
652 if (dma_mapping_error(dev, elem->addr))
653 goto err_out_abort;
654 elem->len = cpu_to_le16(len);
655 elem++;
656 desc_nsge++;
657 left -= len;
658 offset += len;
659 if (nfrags > 0 && frag_left > 0)
660 continue;
661 done = (nfrags == 0 && left == 0);
662 ionic_tx_tso_post(q, desc, skb, desc_addr,
663 desc_nsge, desc_len,
664 hdrlen, mss, outer_csum,
665 vlan_tci, has_vlan,
666 start, done);
667 total_pkts++;
668 total_bytes += start ? len : len + hdrlen;
669 desc = ionic_tx_tso_next(q, &elem);
670 start = false;
671 } else {
672 len = min(mss, left);
673 frag_left = mss - len;
674 desc_addr = ionic_tx_map_frag(q, frag,
675 offset, len);
676 if (dma_mapping_error(dev, desc_addr))
677 goto err_out_abort;
678 desc_len = len;
679 desc_nsge = 0;
680 left -= len;
681 offset += len;
682 if (nfrags > 0 && frag_left > 0)
683 continue;
684 done = (nfrags == 0 && left == 0);
685 ionic_tx_tso_post(q, desc, skb, desc_addr,
686 desc_nsge, desc_len,
687 hdrlen, mss, outer_csum,
688 vlan_tci, has_vlan,
689 start, done);
690 total_pkts++;
691 total_bytes += start ? len : len + hdrlen;
692 desc = ionic_tx_tso_next(q, &elem);
693 start = false;
694 }
695 }
696 }
697
698 stats->pkts += total_pkts;
699 stats->bytes += total_bytes;
700 stats->tso++;
701
702 return 0;
703
704 err_out_abort:
705 while (rewind->desc != q->head->desc) {
706 ionic_tx_clean(q, rewind, NULL, NULL);
707 rewind = rewind->next;
708 }
709 q->head = abort;
710
711 return -ENOMEM;
712 }
713
714 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
715 {
716 struct ionic_tx_stats *stats = q_to_tx_stats(q);
717 struct ionic_txq_desc *desc = q->head->desc;
718 struct device *dev = q->lif->ionic->dev;
719 dma_addr_t dma_addr;
720 bool has_vlan;
721 u8 flags = 0;
722 bool encap;
723 u64 cmd;
724
725 has_vlan = !!skb_vlan_tag_present(skb);
726 encap = skb->encapsulation;
727
728 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
729 if (dma_mapping_error(dev, dma_addr))
730 return -ENOMEM;
731
732 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
733 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
734
735 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
736 flags, skb_shinfo(skb)->nr_frags, dma_addr);
737 desc->cmd = cpu_to_le64(cmd);
738 desc->len = cpu_to_le16(skb_headlen(skb));
739 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
740 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
741 desc->csum_offset = cpu_to_le16(skb->csum_offset);
742
743 if (skb->csum_not_inet)
744 stats->crc32_csum++;
745 else
746 stats->csum++;
747
748 return 0;
749 }
750
751 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
752 {
753 struct ionic_tx_stats *stats = q_to_tx_stats(q);
754 struct ionic_txq_desc *desc = q->head->desc;
755 struct device *dev = q->lif->ionic->dev;
756 dma_addr_t dma_addr;
757 bool has_vlan;
758 u8 flags = 0;
759 bool encap;
760 u64 cmd;
761
762 has_vlan = !!skb_vlan_tag_present(skb);
763 encap = skb->encapsulation;
764
765 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
766 if (dma_mapping_error(dev, dma_addr))
767 return -ENOMEM;
768
769 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
770 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
771
772 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
773 flags, skb_shinfo(skb)->nr_frags, dma_addr);
774 desc->cmd = cpu_to_le64(cmd);
775 desc->len = cpu_to_le16(skb_headlen(skb));
776 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
777
778 stats->no_csum++;
779
780 return 0;
781 }
782
783 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
784 {
785 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
786 unsigned int len_left = skb->len - skb_headlen(skb);
787 struct ionic_txq_sg_elem *elem = sg_desc->elems;
788 struct ionic_tx_stats *stats = q_to_tx_stats(q);
789 struct device *dev = q->lif->ionic->dev;
790 dma_addr_t dma_addr;
791 skb_frag_t *frag;
792 u16 len;
793
794 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
795 len = skb_frag_size(frag);
796 elem->len = cpu_to_le16(len);
797 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
798 if (dma_mapping_error(dev, dma_addr))
799 return -ENOMEM;
800 elem->addr = cpu_to_le64(dma_addr);
801 len_left -= len;
802 stats->frags++;
803 }
804
805 return 0;
806 }
807
808 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
809 {
810 struct ionic_tx_stats *stats = q_to_tx_stats(q);
811 int err;
812
813
814 if (skb->ip_summed == CHECKSUM_PARTIAL)
815 err = ionic_tx_calc_csum(q, skb);
816 else
817 err = ionic_tx_calc_no_csum(q, skb);
818 if (err)
819 return err;
820
821
822 err = ionic_tx_skb_frags(q, skb);
823 if (err)
824 return err;
825
826 skb_tx_timestamp(skb);
827 stats->pkts++;
828 stats->bytes += skb->len;
829
830 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
831 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
832
833 return 0;
834 }
835
836 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
837 {
838 struct ionic_tx_stats *stats = q_to_tx_stats(q);
839 int err;
840
841
842 if (skb_is_gso(skb))
843 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
844
845
846 if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
847 return 1;
848
849
850 err = skb_linearize(skb);
851 if (err)
852 return err;
853
854 stats->linearize++;
855
856
857 return 1;
858 }
859
860 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
861 {
862 int stopped = 0;
863
864 if (unlikely(!ionic_q_has_space(q, ndescs))) {
865 netif_stop_subqueue(q->lif->netdev, q->index);
866 q->stop++;
867 stopped = 1;
868
869
870 smp_rmb();
871 if (ionic_q_has_space(q, ndescs)) {
872 netif_wake_subqueue(q->lif->netdev, q->index);
873 stopped = 0;
874 }
875 }
876
877 return stopped;
878 }
879
880 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
881 {
882 u16 queue_index = skb_get_queue_mapping(skb);
883 struct ionic_lif *lif = netdev_priv(netdev);
884 struct ionic_queue *q;
885 int ndescs;
886 int err;
887
888 if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
889 dev_kfree_skb(skb);
890 return NETDEV_TX_OK;
891 }
892
893 if (unlikely(!lif_to_txqcq(lif, queue_index)))
894 queue_index = 0;
895 q = lif_to_txq(lif, queue_index);
896
897 ndescs = ionic_tx_descs_needed(q, skb);
898 if (ndescs < 0)
899 goto err_out_drop;
900
901 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
902 return NETDEV_TX_BUSY;
903
904 if (skb_is_gso(skb))
905 err = ionic_tx_tso(q, skb);
906 else
907 err = ionic_tx(q, skb);
908
909 if (err)
910 goto err_out_drop;
911
912
913
914
915
916 ionic_maybe_stop_tx(q, 4);
917
918 return NETDEV_TX_OK;
919
920 err_out_drop:
921 q->stop++;
922 q->drop++;
923 dev_kfree_skb(skb);
924 return NETDEV_TX_OK;
925 }