This source file includes following definitions.
- i40e_fdir
- i40e_program_fdir_filter
- i40e_add_del_fdir_udpv4
- i40e_add_del_fdir_tcpv4
- i40e_add_del_fdir_sctpv4
- i40e_add_del_fdir_ipv4
- i40e_add_del_fdir
- i40e_fd_handle_status
- i40e_unmap_and_free_tx_resource
- i40e_clean_tx_ring
- i40e_free_tx_resources
- i40e_get_tx_pending
- i40e_detect_recover_hung
- i40e_clean_tx_irq
- i40e_enable_wb_on_itr
- i40e_force_wb
- i40e_container_is_rx
- i40e_itr_divisor
- i40e_update_itr
- i40e_reuse_rx_page
- i40e_rx_is_programming_status
- i40e_clean_programming_status
- i40e_setup_tx_descriptors
- i40e_clean_rx_ring
- i40e_free_rx_resources
- i40e_setup_rx_descriptors
- i40e_release_rx_desc
- i40e_rx_offset
- i40e_alloc_mapped_page
- i40e_alloc_rx_buffers
- i40e_rx_checksum
- i40e_ptype_to_htype
- i40e_rx_hash
- i40e_process_skb_fields
- i40e_cleanup_headers
- i40e_page_is_reusable
- i40e_can_reuse_rx_page
- i40e_add_rx_frag
- i40e_get_rx_buffer
- i40e_construct_skb
- i40e_build_skb
- i40e_put_rx_buffer
- i40e_is_non_eop
- i40e_xmit_xdp_tx_ring
- i40e_run_xdp
- i40e_rx_buffer_flip
- i40e_xdp_ring_update_tail
- i40e_update_rx_stats
- i40e_finalize_xdp_rx
- i40e_clean_rx_irq
- i40e_buildreg_itr
- i40e_update_enable_itr
- i40e_napi_poll
- i40e_atr
- i40e_tx_prepare_vlan_flags
- i40e_tso
- i40e_tsyn
- i40e_tx_enable_csum
- i40e_create_tx_ctx
- __i40e_maybe_stop_tx
- __i40e_chk_linearize
- i40e_tx_map
- i40e_xmit_xdp_ring
- i40e_xmit_frame_ring
- i40e_lan_xmit_frame
- i40e_xdp_xmit
1
2
3
4 #include <linux/prefetch.h>
5 #include <linux/bpf_trace.h>
6 #include <net/xdp.h>
7 #include "i40e.h"
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10 #include "i40e_txrx_common.h"
11 #include "i40e_xsk.h"
12
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14
15
16
17
18
19
20
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
23 {
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
27 u16 i;
28
29
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
32
33 i++;
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
35
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
38
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
41
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
44
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
47
48
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
52
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
54
55 dtype_cmd |= add ?
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
60
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
63
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
66
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
72 }
73
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
78 }
79
80 #define I40E_FD_CLEAN_DELAY 10
81
82
83
84
85
86
87
88 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
90 bool add)
91 {
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
95 struct i40e_vsi *vsi;
96 struct device *dev;
97 dma_addr_t dma;
98 u32 td_cmd = 0;
99 u16 i;
100
101
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
103 if (!vsi)
104 return -ENOENT;
105
106 tx_ring = vsi->tx_rings[0];
107 dev = tx_ring->dev;
108
109
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
111 if (!i)
112 return -EAGAIN;
113 msleep_interruptible(1);
114 }
115
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
119 goto dma_fail;
120
121
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
125
126
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
130
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
132
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
134
135
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
138
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
141
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
144
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
147
148
149
150
151 wmb();
152
153
154 first->next_to_watch = tx_desc;
155
156 writel(tx_ring->next_to_use, tx_ring->tail);
157 return 0;
158
159 dma_fail:
160 return -1;
161 }
162
163 #define IP_HEADER_OFFSET 14
164 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
165
166
167
168
169
170
171
172
173 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
175 bool add)
176 {
177 struct i40e_pf *pf = vsi->back;
178 struct udphdr *udp;
179 struct iphdr *ip;
180 u8 *raw_packet;
181 int ret;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
185
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
187 if (!raw_packet)
188 return -ENOMEM;
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
190
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
194
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
199
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
204
205 *((__force __be16 *)(payload + off)) = pattern;
206 }
207
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
210 if (ret) {
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214
215 kfree(raw_packet);
216 return -EOPNOTSUPP;
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
218 if (add)
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
222 else
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
226 }
227
228 if (add)
229 pf->fd_udp4_filter_cnt++;
230 else
231 pf->fd_udp4_filter_cnt--;
232
233 return 0;
234 }
235
236 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
237
238
239
240
241
242
243
244
245 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
247 bool add)
248 {
249 struct i40e_pf *pf = vsi->back;
250 struct tcphdr *tcp;
251 struct iphdr *ip;
252 u8 *raw_packet;
253 int ret;
254
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
259
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
261 if (!raw_packet)
262 return -ENOMEM;
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
264
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
268
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
273
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
278
279 *((__force __be16 *)(payload + off)) = pattern;
280 }
281
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
284 if (ret) {
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288
289 kfree(raw_packet);
290 return -EOPNOTSUPP;
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
292 if (add)
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
295 else
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
299 }
300
301 if (add) {
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
307 } else {
308 pf->fd_tcp4_filter_cnt--;
309 }
310
311 return 0;
312 }
313
314 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
315
316
317
318
319
320
321
322
323
324 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
326 bool add)
327 {
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
330 struct iphdr *ip;
331 u8 *raw_packet;
332 int ret;
333
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
337
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
339 if (!raw_packet)
340 return -ENOMEM;
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
342
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
346
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
351
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
356
357 *((__force __be16 *)(payload + off)) = pattern;
358 }
359
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
362 if (ret) {
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366
367 kfree(raw_packet);
368 return -EOPNOTSUPP;
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
370 if (add)
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
374 else
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
378 }
379
380 if (add)
381 pf->fd_sctp4_filter_cnt++;
382 else
383 pf->fd_sctp4_filter_cnt--;
384
385 return 0;
386 }
387
388 #define I40E_IP_DUMMY_PACKET_LEN 34
389
390
391
392
393
394
395
396
397
398 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
400 bool add)
401 {
402 struct i40e_pf *pf = vsi->back;
403 struct iphdr *ip;
404 u8 *raw_packet;
405 int ret;
406 int i;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
409 0, 0, 0, 0};
410
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
414 if (!raw_packet)
415 return -ENOMEM;
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
418
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
421 ip->protocol = 0;
422
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
427
428 *((__force __be16 *)(payload + off)) = pattern;
429 }
430
431 fd_data->pctype = i;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
433 if (ret) {
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437
438
439
440 kfree(raw_packet);
441 return -EOPNOTSUPP;
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
443 if (add)
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
447 else
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
451 }
452 }
453
454 if (add)
455 pf->fd_ip4_filter_cnt++;
456 else
457 pf->fd_ip4_filter_cnt--;
458
459 return 0;
460 }
461
462
463
464
465
466
467
468
469 int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
471 {
472 struct i40e_pf *pf = vsi->back;
473 int ret;
474
475 switch (input->flow_type & ~FLOW_EXT) {
476 case TCP_V4_FLOW:
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
478 break;
479 case UDP_V4_FLOW:
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
481 break;
482 case SCTP_V4_FLOW:
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
484 break;
485 case IP_USER_FLOW:
486 switch (input->ip4_proto) {
487 case IPPROTO_TCP:
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
489 break;
490 case IPPROTO_UDP:
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
492 break;
493 case IPPROTO_SCTP:
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
495 break;
496 case IPPROTO_IP:
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
498 break;
499 default:
500
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
502 input->ip4_proto);
503 return -EINVAL;
504 }
505 break;
506 default:
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
508 input->flow_type);
509 return -EINVAL;
510 }
511
512
513
514
515
516
517
518 return ret;
519 }
520
521
522
523
524
525
526
527
528
529
530 void i40e_fd_handle_status(struct i40e_ring *rx_ring,
531 union i40e_rx_desc *rx_desc, u8 prog_id)
532 {
533 struct i40e_pf *pf = rx_ring->vsi->back;
534 struct pci_dev *pdev = pf->pdev;
535 u32 fcnt_prog, fcnt_avail;
536 u32 error;
537 u64 qw;
538
539 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
540 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
541 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
542
543 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
544 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
545 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
546 (I40E_DEBUG_FD & pf->hw.debug_mask))
547 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
548 pf->fd_inv);
549
550
551
552
553
554
555
556 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
557 return;
558
559 pf->fd_add_err++;
560
561 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
562
563 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
564 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
565
566
567
568
569
570
571 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
572 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
573 }
574
575
576 fcnt_prog = i40e_get_global_fd_count(pf);
577 fcnt_avail = pf->fdir_pf_filter_count;
578
579
580
581
582 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
583 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
584 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
585 pf->state))
586 if (I40E_DEBUG_FD & pf->hw.debug_mask)
587 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
588 }
589 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
590 if (I40E_DEBUG_FD & pf->hw.debug_mask)
591 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
592 rx_desc->wb.qword0.hi_dword.fd_id);
593 }
594 }
595
596
597
598
599
600
601 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
602 struct i40e_tx_buffer *tx_buffer)
603 {
604 if (tx_buffer->skb) {
605 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
606 kfree(tx_buffer->raw_buf);
607 else if (ring_is_xdp(ring))
608 xdp_return_frame(tx_buffer->xdpf);
609 else
610 dev_kfree_skb_any(tx_buffer->skb);
611 if (dma_unmap_len(tx_buffer, len))
612 dma_unmap_single(ring->dev,
613 dma_unmap_addr(tx_buffer, dma),
614 dma_unmap_len(tx_buffer, len),
615 DMA_TO_DEVICE);
616 } else if (dma_unmap_len(tx_buffer, len)) {
617 dma_unmap_page(ring->dev,
618 dma_unmap_addr(tx_buffer, dma),
619 dma_unmap_len(tx_buffer, len),
620 DMA_TO_DEVICE);
621 }
622
623 tx_buffer->next_to_watch = NULL;
624 tx_buffer->skb = NULL;
625 dma_unmap_len_set(tx_buffer, len, 0);
626
627 }
628
629
630
631
632
633 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
634 {
635 unsigned long bi_size;
636 u16 i;
637
638 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
639 i40e_xsk_clean_tx_ring(tx_ring);
640 } else {
641
642 if (!tx_ring->tx_bi)
643 return;
644
645
646 for (i = 0; i < tx_ring->count; i++)
647 i40e_unmap_and_free_tx_resource(tx_ring,
648 &tx_ring->tx_bi[i]);
649 }
650
651 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
652 memset(tx_ring->tx_bi, 0, bi_size);
653
654
655 memset(tx_ring->desc, 0, tx_ring->size);
656
657 tx_ring->next_to_use = 0;
658 tx_ring->next_to_clean = 0;
659
660 if (!tx_ring->netdev)
661 return;
662
663
664 netdev_tx_reset_queue(txring_txq(tx_ring));
665 }
666
667
668
669
670
671
672
673 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
674 {
675 i40e_clean_tx_ring(tx_ring);
676 kfree(tx_ring->tx_bi);
677 tx_ring->tx_bi = NULL;
678
679 if (tx_ring->desc) {
680 dma_free_coherent(tx_ring->dev, tx_ring->size,
681 tx_ring->desc, tx_ring->dma);
682 tx_ring->desc = NULL;
683 }
684 }
685
686
687
688
689
690
691
692
693
694 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
695 {
696 u32 head, tail;
697
698 if (!in_sw) {
699 head = i40e_get_head(ring);
700 tail = readl(ring->tail);
701 } else {
702 head = ring->next_to_clean;
703 tail = ring->next_to_use;
704 }
705
706 if (head != tail)
707 return (head < tail) ?
708 tail - head : (tail + ring->count - head);
709
710 return 0;
711 }
712
713
714
715
716
717
718
719
720 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
721 {
722 struct i40e_ring *tx_ring = NULL;
723 struct net_device *netdev;
724 unsigned int i;
725 int packets;
726
727 if (!vsi)
728 return;
729
730 if (test_bit(__I40E_VSI_DOWN, vsi->state))
731 return;
732
733 netdev = vsi->netdev;
734 if (!netdev)
735 return;
736
737 if (!netif_carrier_ok(netdev))
738 return;
739
740 for (i = 0; i < vsi->num_queue_pairs; i++) {
741 tx_ring = vsi->tx_rings[i];
742 if (tx_ring && tx_ring->desc) {
743
744
745
746
747
748
749
750 packets = tx_ring->stats.packets & INT_MAX;
751 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
752 i40e_force_wb(vsi, tx_ring->q_vector);
753 continue;
754 }
755
756
757
758
759 smp_rmb();
760 tx_ring->tx_stats.prev_pkt_ctr =
761 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
762 }
763 }
764 }
765
766
767
768
769
770
771
772
773
774 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
775 struct i40e_ring *tx_ring, int napi_budget)
776 {
777 int i = tx_ring->next_to_clean;
778 struct i40e_tx_buffer *tx_buf;
779 struct i40e_tx_desc *tx_head;
780 struct i40e_tx_desc *tx_desc;
781 unsigned int total_bytes = 0, total_packets = 0;
782 unsigned int budget = vsi->work_limit;
783
784 tx_buf = &tx_ring->tx_bi[i];
785 tx_desc = I40E_TX_DESC(tx_ring, i);
786 i -= tx_ring->count;
787
788 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
789
790 do {
791 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
792
793
794 if (!eop_desc)
795 break;
796
797
798 smp_rmb();
799
800 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
801
802 if (tx_head == tx_desc)
803 break;
804
805
806 tx_buf->next_to_watch = NULL;
807
808
809 total_bytes += tx_buf->bytecount;
810 total_packets += tx_buf->gso_segs;
811
812
813 if (ring_is_xdp(tx_ring))
814 xdp_return_frame(tx_buf->xdpf);
815 else
816 napi_consume_skb(tx_buf->skb, napi_budget);
817
818
819 dma_unmap_single(tx_ring->dev,
820 dma_unmap_addr(tx_buf, dma),
821 dma_unmap_len(tx_buf, len),
822 DMA_TO_DEVICE);
823
824
825 tx_buf->skb = NULL;
826 dma_unmap_len_set(tx_buf, len, 0);
827
828
829 while (tx_desc != eop_desc) {
830 i40e_trace(clean_tx_irq_unmap,
831 tx_ring, tx_desc, tx_buf);
832
833 tx_buf++;
834 tx_desc++;
835 i++;
836 if (unlikely(!i)) {
837 i -= tx_ring->count;
838 tx_buf = tx_ring->tx_bi;
839 tx_desc = I40E_TX_DESC(tx_ring, 0);
840 }
841
842
843 if (dma_unmap_len(tx_buf, len)) {
844 dma_unmap_page(tx_ring->dev,
845 dma_unmap_addr(tx_buf, dma),
846 dma_unmap_len(tx_buf, len),
847 DMA_TO_DEVICE);
848 dma_unmap_len_set(tx_buf, len, 0);
849 }
850 }
851
852
853 tx_buf++;
854 tx_desc++;
855 i++;
856 if (unlikely(!i)) {
857 i -= tx_ring->count;
858 tx_buf = tx_ring->tx_bi;
859 tx_desc = I40E_TX_DESC(tx_ring, 0);
860 }
861
862 prefetch(tx_desc);
863
864
865 budget--;
866 } while (likely(budget));
867
868 i += tx_ring->count;
869 tx_ring->next_to_clean = i;
870 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
871 i40e_arm_wb(tx_ring, vsi, budget);
872
873 if (ring_is_xdp(tx_ring))
874 return !!budget;
875
876
877 netdev_tx_completed_queue(txring_txq(tx_ring),
878 total_packets, total_bytes);
879
880 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
881 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
882 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
883
884
885
886 smp_mb();
887 if (__netif_subqueue_stopped(tx_ring->netdev,
888 tx_ring->queue_index) &&
889 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
890 netif_wake_subqueue(tx_ring->netdev,
891 tx_ring->queue_index);
892 ++tx_ring->tx_stats.restart_queue;
893 }
894 }
895
896 return !!budget;
897 }
898
899
900
901
902
903
904
905 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
906 struct i40e_q_vector *q_vector)
907 {
908 u16 flags = q_vector->tx.ring[0].flags;
909 u32 val;
910
911 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
912 return;
913
914 if (q_vector->arm_wb_state)
915 return;
916
917 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
918 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
919 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK;
920
921 wr32(&vsi->back->hw,
922 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
923 val);
924 } else {
925 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
926 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK;
927
928 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
929 }
930 q_vector->arm_wb_state = true;
931 }
932
933
934
935
936
937
938
939 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
940 {
941 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
942 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
943 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
944 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
945 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
946
947
948 wr32(&vsi->back->hw,
949 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
950 } else {
951 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
952 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
953 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
954 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
955
956
957 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
958 }
959 }
960
961 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
962 struct i40e_ring_container *rc)
963 {
964 return &q_vector->rx == rc;
965 }
966
967 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
968 {
969 unsigned int divisor;
970
971 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
972 case I40E_LINK_SPEED_40GB:
973 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
974 break;
975 case I40E_LINK_SPEED_25GB:
976 case I40E_LINK_SPEED_20GB:
977 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
978 break;
979 default:
980 case I40E_LINK_SPEED_10GB:
981 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
982 break;
983 case I40E_LINK_SPEED_1GB:
984 case I40E_LINK_SPEED_100MB:
985 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
986 break;
987 }
988
989 return divisor;
990 }
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1006 struct i40e_ring_container *rc)
1007 {
1008 unsigned int avg_wire_size, packets, bytes, itr;
1009 unsigned long next_update = jiffies;
1010
1011
1012
1013
1014 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1015 return;
1016
1017
1018
1019
1020 itr = i40e_container_is_rx(q_vector, rc) ?
1021 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1022 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1023
1024
1025
1026
1027
1028
1029 if (time_after(next_update, rc->next_update))
1030 goto clear_counts;
1031
1032
1033
1034
1035
1036
1037
1038 if (q_vector->itr_countdown) {
1039 itr = rc->target_itr;
1040 goto clear_counts;
1041 }
1042
1043 packets = rc->total_packets;
1044 bytes = rc->total_bytes;
1045
1046 if (i40e_container_is_rx(q_vector, rc)) {
1047
1048
1049
1050
1051
1052 if (packets && packets < 4 && bytes < 9000 &&
1053 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1054 itr = I40E_ITR_ADAPTIVE_LATENCY;
1055 goto adjust_by_size;
1056 }
1057 } else if (packets < 4) {
1058
1059
1060
1061
1062
1063 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1064 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1065 I40E_ITR_ADAPTIVE_MAX_USECS)
1066 goto clear_counts;
1067 } else if (packets > 32) {
1068
1069
1070
1071 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 if (packets < 56) {
1083 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1084 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1085 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1086 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1087 }
1088 goto clear_counts;
1089 }
1090
1091 if (packets <= 256) {
1092 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1093 itr &= I40E_ITR_MASK;
1094
1095
1096
1097
1098
1099 if (packets <= 112)
1100 goto clear_counts;
1101
1102
1103
1104
1105
1106
1107 itr /= 2;
1108 itr &= I40E_ITR_MASK;
1109 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1110 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1111
1112 goto clear_counts;
1113 }
1114
1115
1116
1117
1118
1119
1120
1121 itr = I40E_ITR_ADAPTIVE_BULK;
1122
1123 adjust_by_size:
1124
1125
1126
1127
1128
1129 avg_wire_size = bytes / packets;
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 if (avg_wire_size <= 60) {
1147
1148 avg_wire_size = 4096;
1149 } else if (avg_wire_size <= 380) {
1150
1151 avg_wire_size *= 40;
1152 avg_wire_size += 1696;
1153 } else if (avg_wire_size <= 1084) {
1154
1155 avg_wire_size *= 15;
1156 avg_wire_size += 11452;
1157 } else if (avg_wire_size <= 1980) {
1158
1159 avg_wire_size *= 5;
1160 avg_wire_size += 22420;
1161 } else {
1162
1163 avg_wire_size = 32256;
1164 }
1165
1166
1167
1168
1169 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1170 avg_wire_size /= 2;
1171
1172
1173
1174
1175
1176
1177
1178
1179 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1180 I40E_ITR_ADAPTIVE_MIN_INC;
1181
1182 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1183 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1184 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1185 }
1186
1187 clear_counts:
1188
1189 rc->target_itr = itr;
1190
1191
1192 rc->next_update = next_update + 1;
1193
1194 rc->total_bytes = 0;
1195 rc->total_packets = 0;
1196 }
1197
1198
1199
1200
1201
1202
1203
1204
1205 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1206 struct i40e_rx_buffer *old_buff)
1207 {
1208 struct i40e_rx_buffer *new_buff;
1209 u16 nta = rx_ring->next_to_alloc;
1210
1211 new_buff = &rx_ring->rx_bi[nta];
1212
1213
1214 nta++;
1215 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1216
1217
1218 new_buff->dma = old_buff->dma;
1219 new_buff->page = old_buff->page;
1220 new_buff->page_offset = old_buff->page_offset;
1221 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1222
1223 rx_ring->rx_stats.page_reuse_count++;
1224
1225
1226 old_buff->page = NULL;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 static inline bool i40e_rx_is_programming_status(u64 qw)
1239 {
1240
1241
1242
1243
1244
1245 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1246 }
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 struct i40e_rx_buffer *i40e_clean_programming_status(
1261 struct i40e_ring *rx_ring,
1262 union i40e_rx_desc *rx_desc,
1263 u64 qw)
1264 {
1265 struct i40e_rx_buffer *rx_buffer;
1266 u32 ntc;
1267 u8 id;
1268
1269 if (!i40e_rx_is_programming_status(qw))
1270 return NULL;
1271
1272 ntc = rx_ring->next_to_clean;
1273
1274
1275 rx_buffer = &rx_ring->rx_bi[ntc++];
1276 ntc = (ntc < rx_ring->count) ? ntc : 0;
1277 rx_ring->next_to_clean = ntc;
1278
1279 prefetch(I40E_RX_DESC(rx_ring, ntc));
1280
1281 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1282 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1283
1284 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1285 i40e_fd_handle_status(rx_ring, rx_desc, id);
1286
1287 return rx_buffer;
1288 }
1289
1290
1291
1292
1293
1294
1295
1296 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1297 {
1298 struct device *dev = tx_ring->dev;
1299 int bi_size;
1300
1301 if (!dev)
1302 return -ENOMEM;
1303
1304
1305 WARN_ON(tx_ring->tx_bi);
1306 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1307 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1308 if (!tx_ring->tx_bi)
1309 goto err;
1310
1311 u64_stats_init(&tx_ring->syncp);
1312
1313
1314 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1315
1316
1317
1318 tx_ring->size += sizeof(u32);
1319 tx_ring->size = ALIGN(tx_ring->size, 4096);
1320 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1321 &tx_ring->dma, GFP_KERNEL);
1322 if (!tx_ring->desc) {
1323 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1324 tx_ring->size);
1325 goto err;
1326 }
1327
1328 tx_ring->next_to_use = 0;
1329 tx_ring->next_to_clean = 0;
1330 tx_ring->tx_stats.prev_pkt_ctr = -1;
1331 return 0;
1332
1333 err:
1334 kfree(tx_ring->tx_bi);
1335 tx_ring->tx_bi = NULL;
1336 return -ENOMEM;
1337 }
1338
1339
1340
1341
1342
1343 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1344 {
1345 unsigned long bi_size;
1346 u16 i;
1347
1348
1349 if (!rx_ring->rx_bi)
1350 return;
1351
1352 if (rx_ring->skb) {
1353 dev_kfree_skb(rx_ring->skb);
1354 rx_ring->skb = NULL;
1355 }
1356
1357 if (rx_ring->xsk_umem) {
1358 i40e_xsk_clean_rx_ring(rx_ring);
1359 goto skip_free;
1360 }
1361
1362
1363 for (i = 0; i < rx_ring->count; i++) {
1364 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1365
1366 if (!rx_bi->page)
1367 continue;
1368
1369
1370
1371
1372 dma_sync_single_range_for_cpu(rx_ring->dev,
1373 rx_bi->dma,
1374 rx_bi->page_offset,
1375 rx_ring->rx_buf_len,
1376 DMA_FROM_DEVICE);
1377
1378
1379 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1380 i40e_rx_pg_size(rx_ring),
1381 DMA_FROM_DEVICE,
1382 I40E_RX_DMA_ATTR);
1383
1384 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1385
1386 rx_bi->page = NULL;
1387 rx_bi->page_offset = 0;
1388 }
1389
1390 skip_free:
1391 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1392 memset(rx_ring->rx_bi, 0, bi_size);
1393
1394
1395 memset(rx_ring->desc, 0, rx_ring->size);
1396
1397 rx_ring->next_to_alloc = 0;
1398 rx_ring->next_to_clean = 0;
1399 rx_ring->next_to_use = 0;
1400 }
1401
1402
1403
1404
1405
1406
1407
1408 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1409 {
1410 i40e_clean_rx_ring(rx_ring);
1411 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1412 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1413 rx_ring->xdp_prog = NULL;
1414 kfree(rx_ring->rx_bi);
1415 rx_ring->rx_bi = NULL;
1416
1417 if (rx_ring->desc) {
1418 dma_free_coherent(rx_ring->dev, rx_ring->size,
1419 rx_ring->desc, rx_ring->dma);
1420 rx_ring->desc = NULL;
1421 }
1422 }
1423
1424
1425
1426
1427
1428
1429
1430 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1431 {
1432 struct device *dev = rx_ring->dev;
1433 int err = -ENOMEM;
1434 int bi_size;
1435
1436
1437 WARN_ON(rx_ring->rx_bi);
1438 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1439 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1440 if (!rx_ring->rx_bi)
1441 goto err;
1442
1443 u64_stats_init(&rx_ring->syncp);
1444
1445
1446 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1447 rx_ring->size = ALIGN(rx_ring->size, 4096);
1448 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1449 &rx_ring->dma, GFP_KERNEL);
1450
1451 if (!rx_ring->desc) {
1452 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1453 rx_ring->size);
1454 goto err;
1455 }
1456
1457 rx_ring->next_to_alloc = 0;
1458 rx_ring->next_to_clean = 0;
1459 rx_ring->next_to_use = 0;
1460
1461
1462 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1463 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1464 rx_ring->queue_index);
1465 if (err < 0)
1466 goto err;
1467 }
1468
1469 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1470
1471 return 0;
1472 err:
1473 kfree(rx_ring->rx_bi);
1474 rx_ring->rx_bi = NULL;
1475 return err;
1476 }
1477
1478
1479
1480
1481
1482
1483 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1484 {
1485 rx_ring->next_to_use = val;
1486
1487
1488 rx_ring->next_to_alloc = val;
1489
1490
1491
1492
1493
1494
1495 wmb();
1496 writel(val, rx_ring->tail);
1497 }
1498
1499
1500
1501
1502
1503
1504
1505 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1506 {
1507 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1508 }
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1519 struct i40e_rx_buffer *bi)
1520 {
1521 struct page *page = bi->page;
1522 dma_addr_t dma;
1523
1524
1525 if (likely(page)) {
1526 rx_ring->rx_stats.page_reuse_count++;
1527 return true;
1528 }
1529
1530
1531 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1532 if (unlikely(!page)) {
1533 rx_ring->rx_stats.alloc_page_failed++;
1534 return false;
1535 }
1536
1537
1538 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1539 i40e_rx_pg_size(rx_ring),
1540 DMA_FROM_DEVICE,
1541 I40E_RX_DMA_ATTR);
1542
1543
1544
1545
1546 if (dma_mapping_error(rx_ring->dev, dma)) {
1547 __free_pages(page, i40e_rx_pg_order(rx_ring));
1548 rx_ring->rx_stats.alloc_page_failed++;
1549 return false;
1550 }
1551
1552 bi->dma = dma;
1553 bi->page = page;
1554 bi->page_offset = i40e_rx_offset(rx_ring);
1555 page_ref_add(page, USHRT_MAX - 1);
1556 bi->pagecnt_bias = USHRT_MAX;
1557
1558 return true;
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1569 {
1570 u16 ntu = rx_ring->next_to_use;
1571 union i40e_rx_desc *rx_desc;
1572 struct i40e_rx_buffer *bi;
1573
1574
1575 if (!rx_ring->netdev || !cleaned_count)
1576 return false;
1577
1578 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1579 bi = &rx_ring->rx_bi[ntu];
1580
1581 do {
1582 if (!i40e_alloc_mapped_page(rx_ring, bi))
1583 goto no_buffers;
1584
1585
1586 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1587 bi->page_offset,
1588 rx_ring->rx_buf_len,
1589 DMA_FROM_DEVICE);
1590
1591
1592
1593
1594 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1595
1596 rx_desc++;
1597 bi++;
1598 ntu++;
1599 if (unlikely(ntu == rx_ring->count)) {
1600 rx_desc = I40E_RX_DESC(rx_ring, 0);
1601 bi = rx_ring->rx_bi;
1602 ntu = 0;
1603 }
1604
1605
1606 rx_desc->wb.qword1.status_error_len = 0;
1607
1608 cleaned_count--;
1609 } while (cleaned_count);
1610
1611 if (rx_ring->next_to_use != ntu)
1612 i40e_release_rx_desc(rx_ring, ntu);
1613
1614 return false;
1615
1616 no_buffers:
1617 if (rx_ring->next_to_use != ntu)
1618 i40e_release_rx_desc(rx_ring, ntu);
1619
1620
1621
1622
1623 return true;
1624 }
1625
1626
1627
1628
1629
1630
1631
1632 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1633 struct sk_buff *skb,
1634 union i40e_rx_desc *rx_desc)
1635 {
1636 struct i40e_rx_ptype_decoded decoded;
1637 u32 rx_error, rx_status;
1638 bool ipv4, ipv6;
1639 u8 ptype;
1640 u64 qword;
1641
1642 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1643 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1644 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1645 I40E_RXD_QW1_ERROR_SHIFT;
1646 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1647 I40E_RXD_QW1_STATUS_SHIFT;
1648 decoded = decode_rx_desc_ptype(ptype);
1649
1650 skb->ip_summed = CHECKSUM_NONE;
1651
1652 skb_checksum_none_assert(skb);
1653
1654
1655 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1656 return;
1657
1658
1659 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1660 return;
1661
1662
1663 if (!(decoded.known && decoded.outer_ip))
1664 return;
1665
1666 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1667 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1668 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1669 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1670
1671 if (ipv4 &&
1672 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1673 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1674 goto checksum_fail;
1675
1676
1677 if (ipv6 &&
1678 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1679
1680 return;
1681
1682
1683 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1684 goto checksum_fail;
1685
1686
1687
1688
1689
1690 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1691 return;
1692
1693
1694
1695
1696
1697 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1698 skb->csum_level = 1;
1699
1700
1701 switch (decoded.inner_prot) {
1702 case I40E_RX_PTYPE_INNER_PROT_TCP:
1703 case I40E_RX_PTYPE_INNER_PROT_UDP:
1704 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1705 skb->ip_summed = CHECKSUM_UNNECESSARY;
1706
1707 default:
1708 break;
1709 }
1710
1711 return;
1712
1713 checksum_fail:
1714 vsi->back->hw_csum_rx_error++;
1715 }
1716
1717
1718
1719
1720
1721
1722
1723 static inline int i40e_ptype_to_htype(u8 ptype)
1724 {
1725 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1726
1727 if (!decoded.known)
1728 return PKT_HASH_TYPE_NONE;
1729
1730 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1731 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1732 return PKT_HASH_TYPE_L4;
1733 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1734 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1735 return PKT_HASH_TYPE_L3;
1736 else
1737 return PKT_HASH_TYPE_L2;
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747 static inline void i40e_rx_hash(struct i40e_ring *ring,
1748 union i40e_rx_desc *rx_desc,
1749 struct sk_buff *skb,
1750 u8 rx_ptype)
1751 {
1752 u32 hash;
1753 const __le64 rss_mask =
1754 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1755 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1756
1757 if (!(ring->netdev->features & NETIF_F_RXHASH))
1758 return;
1759
1760 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1761 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1762 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1763 }
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1778 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1779 {
1780 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1781 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1782 I40E_RXD_QW1_STATUS_SHIFT;
1783 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1784 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1785 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1786 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1787 I40E_RXD_QW1_PTYPE_SHIFT;
1788
1789 if (unlikely(tsynvalid))
1790 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1791
1792 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1793
1794 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1795
1796 skb_record_rx_queue(skb, rx_ring->queue_index);
1797
1798 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1799 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1800
1801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1802 le16_to_cpu(vlan_tag));
1803 }
1804
1805
1806 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1824 union i40e_rx_desc *rx_desc)
1825
1826 {
1827
1828 if (IS_ERR(skb))
1829 return true;
1830
1831
1832
1833
1834
1835
1836 if (unlikely(i40e_test_staterr(rx_desc,
1837 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1838 dev_kfree_skb_any(skb);
1839 return true;
1840 }
1841
1842
1843 if (eth_skb_pad(skb))
1844 return true;
1845
1846 return false;
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856 static inline bool i40e_page_is_reusable(struct page *page)
1857 {
1858 return (page_to_nid(page) == numa_mem_id()) &&
1859 !page_is_pfmemalloc(page);
1860 }
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1890 {
1891 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1892 struct page *page = rx_buffer->page;
1893
1894
1895 if (unlikely(!i40e_page_is_reusable(page)))
1896 return false;
1897
1898 #if (PAGE_SIZE < 8192)
1899
1900 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1901 return false;
1902 #else
1903 #define I40E_LAST_OFFSET \
1904 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1905 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1906 return false;
1907 #endif
1908
1909
1910
1911
1912
1913 if (unlikely(pagecnt_bias == 1)) {
1914 page_ref_add(page, USHRT_MAX - 1);
1915 rx_buffer->pagecnt_bias = USHRT_MAX;
1916 }
1917
1918 return true;
1919 }
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1934 struct i40e_rx_buffer *rx_buffer,
1935 struct sk_buff *skb,
1936 unsigned int size)
1937 {
1938 #if (PAGE_SIZE < 8192)
1939 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1940 #else
1941 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1942 #endif
1943
1944 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1945 rx_buffer->page_offset, size, truesize);
1946
1947
1948 #if (PAGE_SIZE < 8192)
1949 rx_buffer->page_offset ^= truesize;
1950 #else
1951 rx_buffer->page_offset += truesize;
1952 #endif
1953 }
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1964 const unsigned int size)
1965 {
1966 struct i40e_rx_buffer *rx_buffer;
1967
1968 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1969 prefetchw(rx_buffer->page);
1970
1971
1972 dma_sync_single_range_for_cpu(rx_ring->dev,
1973 rx_buffer->dma,
1974 rx_buffer->page_offset,
1975 size,
1976 DMA_FROM_DEVICE);
1977
1978
1979 rx_buffer->pagecnt_bias--;
1980
1981 return rx_buffer;
1982 }
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1995 struct i40e_rx_buffer *rx_buffer,
1996 struct xdp_buff *xdp)
1997 {
1998 unsigned int size = xdp->data_end - xdp->data;
1999 #if (PAGE_SIZE < 8192)
2000 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2001 #else
2002 unsigned int truesize = SKB_DATA_ALIGN(size);
2003 #endif
2004 unsigned int headlen;
2005 struct sk_buff *skb;
2006
2007
2008 prefetch(xdp->data);
2009 #if L1_CACHE_BYTES < 128
2010 prefetch(xdp->data + L1_CACHE_BYTES);
2011 #endif
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2030 I40E_RX_HDR_SIZE,
2031 GFP_ATOMIC | __GFP_NOWARN);
2032 if (unlikely(!skb))
2033 return NULL;
2034
2035
2036 headlen = size;
2037 if (headlen > I40E_RX_HDR_SIZE)
2038 headlen = eth_get_headlen(skb->dev, xdp->data,
2039 I40E_RX_HDR_SIZE);
2040
2041
2042 memcpy(__skb_put(skb, headlen), xdp->data,
2043 ALIGN(headlen, sizeof(long)));
2044
2045
2046 size -= headlen;
2047 if (size) {
2048 skb_add_rx_frag(skb, 0, rx_buffer->page,
2049 rx_buffer->page_offset + headlen,
2050 size, truesize);
2051
2052
2053 #if (PAGE_SIZE < 8192)
2054 rx_buffer->page_offset ^= truesize;
2055 #else
2056 rx_buffer->page_offset += truesize;
2057 #endif
2058 } else {
2059
2060 rx_buffer->pagecnt_bias++;
2061 }
2062
2063 return skb;
2064 }
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2076 struct i40e_rx_buffer *rx_buffer,
2077 struct xdp_buff *xdp)
2078 {
2079 unsigned int metasize = xdp->data - xdp->data_meta;
2080 #if (PAGE_SIZE < 8192)
2081 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2082 #else
2083 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2084 SKB_DATA_ALIGN(xdp->data_end -
2085 xdp->data_hard_start);
2086 #endif
2087 struct sk_buff *skb;
2088
2089
2090
2091
2092
2093
2094 prefetch(xdp->data_meta);
2095 #if L1_CACHE_BYTES < 128
2096 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2097 #endif
2098
2099 skb = build_skb(xdp->data_hard_start, truesize);
2100 if (unlikely(!skb))
2101 return NULL;
2102
2103
2104 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2105 __skb_put(skb, xdp->data_end - xdp->data);
2106 if (metasize)
2107 skb_metadata_set(skb, metasize);
2108
2109
2110 #if (PAGE_SIZE < 8192)
2111 rx_buffer->page_offset ^= truesize;
2112 #else
2113 rx_buffer->page_offset += truesize;
2114 #endif
2115
2116 return skb;
2117 }
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2128 struct i40e_rx_buffer *rx_buffer)
2129 {
2130 if (i40e_can_reuse_rx_page(rx_buffer)) {
2131
2132 i40e_reuse_rx_page(rx_ring, rx_buffer);
2133 } else {
2134
2135 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2136 i40e_rx_pg_size(rx_ring),
2137 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2138 __page_frag_cache_drain(rx_buffer->page,
2139 rx_buffer->pagecnt_bias);
2140
2141 rx_buffer->page = NULL;
2142 }
2143 }
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2157 union i40e_rx_desc *rx_desc,
2158 struct sk_buff *skb)
2159 {
2160 u32 ntc = rx_ring->next_to_clean + 1;
2161
2162
2163 ntc = (ntc < rx_ring->count) ? ntc : 0;
2164 rx_ring->next_to_clean = ntc;
2165
2166 prefetch(I40E_RX_DESC(rx_ring, ntc));
2167
2168
2169 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2170 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2171 return false;
2172
2173 rx_ring->rx_stats.non_eop_descs++;
2174
2175 return true;
2176 }
2177
2178 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2179 struct i40e_ring *xdp_ring);
2180
2181 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2182 {
2183 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2184
2185 if (unlikely(!xdpf))
2186 return I40E_XDP_CONSUMED;
2187
2188 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2189 }
2190
2191
2192
2193
2194
2195
2196 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2197 struct xdp_buff *xdp)
2198 {
2199 int err, result = I40E_XDP_PASS;
2200 struct i40e_ring *xdp_ring;
2201 struct bpf_prog *xdp_prog;
2202 u32 act;
2203
2204 rcu_read_lock();
2205 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2206
2207 if (!xdp_prog)
2208 goto xdp_out;
2209
2210 prefetchw(xdp->data_hard_start);
2211
2212 act = bpf_prog_run_xdp(xdp_prog, xdp);
2213 switch (act) {
2214 case XDP_PASS:
2215 break;
2216 case XDP_TX:
2217 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2218 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2219 break;
2220 case XDP_REDIRECT:
2221 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2222 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2223 break;
2224 default:
2225 bpf_warn_invalid_xdp_action(act);
2226
2227 case XDP_ABORTED:
2228 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2229
2230 case XDP_DROP:
2231 result = I40E_XDP_CONSUMED;
2232 break;
2233 }
2234 xdp_out:
2235 rcu_read_unlock();
2236 return ERR_PTR(-result);
2237 }
2238
2239
2240
2241
2242
2243
2244
2245 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2246 struct i40e_rx_buffer *rx_buffer,
2247 unsigned int size)
2248 {
2249 #if (PAGE_SIZE < 8192)
2250 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2251
2252 rx_buffer->page_offset ^= truesize;
2253 #else
2254 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2255
2256 rx_buffer->page_offset += truesize;
2257 #endif
2258 }
2259
2260
2261
2262
2263
2264
2265
2266 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2267 {
2268
2269
2270
2271 wmb();
2272 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2273 }
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2284 unsigned int total_rx_bytes,
2285 unsigned int total_rx_packets)
2286 {
2287 u64_stats_update_begin(&rx_ring->syncp);
2288 rx_ring->stats.packets += total_rx_packets;
2289 rx_ring->stats.bytes += total_rx_bytes;
2290 u64_stats_update_end(&rx_ring->syncp);
2291 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2292 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2293 }
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2305 {
2306 if (xdp_res & I40E_XDP_REDIR)
2307 xdp_do_flush_map();
2308
2309 if (xdp_res & I40E_XDP_TX) {
2310 struct i40e_ring *xdp_ring =
2311 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2312
2313 i40e_xdp_ring_update_tail(xdp_ring);
2314 }
2315 }
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2330 {
2331 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2332 struct sk_buff *skb = rx_ring->skb;
2333 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2334 unsigned int xdp_xmit = 0;
2335 bool failure = false;
2336 struct xdp_buff xdp;
2337
2338 xdp.rxq = &rx_ring->xdp_rxq;
2339
2340 while (likely(total_rx_packets < (unsigned int)budget)) {
2341 struct i40e_rx_buffer *rx_buffer;
2342 union i40e_rx_desc *rx_desc;
2343 unsigned int size;
2344 u64 qword;
2345
2346
2347 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2348 failure = failure ||
2349 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2350 cleaned_count = 0;
2351 }
2352
2353 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2354
2355
2356
2357
2358
2359
2360 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2361
2362
2363
2364
2365
2366 dma_rmb();
2367
2368 rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
2369 qword);
2370 if (unlikely(rx_buffer)) {
2371 i40e_reuse_rx_page(rx_ring, rx_buffer);
2372 cleaned_count++;
2373 continue;
2374 }
2375
2376 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2377 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2378 if (!size)
2379 break;
2380
2381 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2382 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2383
2384
2385 if (!skb) {
2386 xdp.data = page_address(rx_buffer->page) +
2387 rx_buffer->page_offset;
2388 xdp.data_meta = xdp.data;
2389 xdp.data_hard_start = xdp.data -
2390 i40e_rx_offset(rx_ring);
2391 xdp.data_end = xdp.data + size;
2392
2393 skb = i40e_run_xdp(rx_ring, &xdp);
2394 }
2395
2396 if (IS_ERR(skb)) {
2397 unsigned int xdp_res = -PTR_ERR(skb);
2398
2399 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2400 xdp_xmit |= xdp_res;
2401 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2402 } else {
2403 rx_buffer->pagecnt_bias++;
2404 }
2405 total_rx_bytes += size;
2406 total_rx_packets++;
2407 } else if (skb) {
2408 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2409 } else if (ring_uses_build_skb(rx_ring)) {
2410 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2411 } else {
2412 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2413 }
2414
2415
2416 if (!skb) {
2417 rx_ring->rx_stats.alloc_buff_failed++;
2418 rx_buffer->pagecnt_bias++;
2419 break;
2420 }
2421
2422 i40e_put_rx_buffer(rx_ring, rx_buffer);
2423 cleaned_count++;
2424
2425 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2426 continue;
2427
2428 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2429 skb = NULL;
2430 continue;
2431 }
2432
2433
2434 total_rx_bytes += skb->len;
2435
2436
2437 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2438
2439 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2440 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2441 skb = NULL;
2442
2443
2444 total_rx_packets++;
2445 }
2446
2447 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2448 rx_ring->skb = skb;
2449
2450 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2451
2452
2453 return failure ? budget : (int)total_rx_packets;
2454 }
2455
2456 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2457 {
2458 u32 val;
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475 itr &= I40E_ITR_MASK;
2476
2477 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2478 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2479 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2480
2481 return val;
2482 }
2483
2484
2485 #define INTREG I40E_PFINT_DYN_CTLN
2486
2487
2488
2489
2490
2491
2492
2493
2494 #define ITR_COUNTDOWN_START 3
2495
2496
2497
2498
2499
2500
2501
2502 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2503 struct i40e_q_vector *q_vector)
2504 {
2505 struct i40e_hw *hw = &vsi->back->hw;
2506 u32 intval;
2507
2508
2509 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2510 i40e_irq_dynamic_enable_icr0(vsi->back);
2511 return;
2512 }
2513
2514
2515 i40e_update_itr(q_vector, &q_vector->tx);
2516 i40e_update_itr(q_vector, &q_vector->rx);
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2527
2528 intval = i40e_buildreg_itr(I40E_RX_ITR,
2529 q_vector->rx.target_itr);
2530 q_vector->rx.current_itr = q_vector->rx.target_itr;
2531 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2532 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2533 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2534 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2535
2536
2537
2538 intval = i40e_buildreg_itr(I40E_TX_ITR,
2539 q_vector->tx.target_itr);
2540 q_vector->tx.current_itr = q_vector->tx.target_itr;
2541 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2542 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2543
2544 intval = i40e_buildreg_itr(I40E_RX_ITR,
2545 q_vector->rx.target_itr);
2546 q_vector->rx.current_itr = q_vector->rx.target_itr;
2547 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2548 } else {
2549
2550 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2551 if (q_vector->itr_countdown)
2552 q_vector->itr_countdown--;
2553 }
2554
2555 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2556 wr32(hw, INTREG(q_vector->reg_idx), intval);
2557 }
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568 int i40e_napi_poll(struct napi_struct *napi, int budget)
2569 {
2570 struct i40e_q_vector *q_vector =
2571 container_of(napi, struct i40e_q_vector, napi);
2572 struct i40e_vsi *vsi = q_vector->vsi;
2573 struct i40e_ring *ring;
2574 bool clean_complete = true;
2575 bool arm_wb = false;
2576 int budget_per_ring;
2577 int work_done = 0;
2578
2579 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2580 napi_complete(napi);
2581 return 0;
2582 }
2583
2584
2585
2586
2587 i40e_for_each_ring(ring, q_vector->tx) {
2588 bool wd = ring->xsk_umem ?
2589 i40e_clean_xdp_tx_irq(vsi, ring, budget) :
2590 i40e_clean_tx_irq(vsi, ring, budget);
2591
2592 if (!wd) {
2593 clean_complete = false;
2594 continue;
2595 }
2596 arm_wb |= ring->arm_wb;
2597 ring->arm_wb = false;
2598 }
2599
2600
2601 if (budget <= 0)
2602 goto tx_only;
2603
2604
2605
2606
2607 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2608
2609 i40e_for_each_ring(ring, q_vector->rx) {
2610 int cleaned = ring->xsk_umem ?
2611 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2612 i40e_clean_rx_irq(ring, budget_per_ring);
2613
2614 work_done += cleaned;
2615
2616 if (cleaned >= budget_per_ring)
2617 clean_complete = false;
2618 }
2619
2620
2621 if (!clean_complete) {
2622 int cpu_id = smp_processor_id();
2623
2624
2625
2626
2627
2628
2629
2630
2631 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2632
2633 napi_complete_done(napi, work_done);
2634
2635
2636 i40e_force_wb(vsi, q_vector);
2637
2638
2639 return budget - 1;
2640 }
2641 tx_only:
2642 if (arm_wb) {
2643 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2644 i40e_enable_wb_on_itr(vsi, q_vector);
2645 }
2646 return budget;
2647 }
2648
2649 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2650 q_vector->arm_wb_state = false;
2651
2652
2653
2654
2655 if (likely(napi_complete_done(napi, work_done)))
2656 i40e_update_enable_itr(vsi, q_vector);
2657
2658 return min(work_done, budget - 1);
2659 }
2660
2661
2662
2663
2664
2665
2666
2667 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2668 u32 tx_flags)
2669 {
2670 struct i40e_filter_program_desc *fdir_desc;
2671 struct i40e_pf *pf = tx_ring->vsi->back;
2672 union {
2673 unsigned char *network;
2674 struct iphdr *ipv4;
2675 struct ipv6hdr *ipv6;
2676 } hdr;
2677 struct tcphdr *th;
2678 unsigned int hlen;
2679 u32 flex_ptype, dtype_cmd;
2680 int l4_proto;
2681 u16 i;
2682
2683
2684 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2685 return;
2686
2687 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2688 return;
2689
2690
2691 if (!tx_ring->atr_sample_rate)
2692 return;
2693
2694
2695 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2696 return;
2697
2698
2699 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2700 skb_inner_network_header(skb) : skb_network_header(skb);
2701
2702
2703
2704
2705 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2706
2707 hlen = (hdr.network[0] & 0x0F) << 2;
2708 l4_proto = hdr.ipv4->protocol;
2709 } else {
2710
2711 unsigned int inner_hlen = hdr.network - skb->data;
2712 unsigned int h_offset = inner_hlen;
2713
2714
2715 l4_proto =
2716 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2717
2718 hlen = h_offset - inner_hlen;
2719 }
2720
2721 if (l4_proto != IPPROTO_TCP)
2722 return;
2723
2724 th = (struct tcphdr *)(hdr.network + hlen);
2725
2726
2727 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2728 return;
2729 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2730
2731
2732
2733 if (th->fin || th->rst)
2734 return;
2735 }
2736
2737 tx_ring->atr_count++;
2738
2739
2740 if (!th->fin &&
2741 !th->syn &&
2742 !th->rst &&
2743 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2744 return;
2745
2746 tx_ring->atr_count = 0;
2747
2748
2749 i = tx_ring->next_to_use;
2750 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2751
2752 i++;
2753 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2754
2755 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2756 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2757 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2758 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2759 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2760 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2761 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2762
2763 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2764
2765 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2766
2767 dtype_cmd |= (th->fin || th->rst) ?
2768 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2769 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2770 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2771 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2772
2773 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2774 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2775
2776 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2777 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2778
2779 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2780 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2781 dtype_cmd |=
2782 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2783 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2784 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2785 else
2786 dtype_cmd |=
2787 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2788 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2789 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2790
2791 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2792 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2793
2794 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2795 fdir_desc->rsvd = cpu_to_le32(0);
2796 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2797 fdir_desc->fd_id = cpu_to_le32(0);
2798 }
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2813 struct i40e_ring *tx_ring,
2814 u32 *flags)
2815 {
2816 __be16 protocol = skb->protocol;
2817 u32 tx_flags = 0;
2818
2819 if (protocol == htons(ETH_P_8021Q) &&
2820 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2821
2822
2823
2824
2825
2826
2827
2828 skb->protocol = vlan_get_protocol(skb);
2829 goto out;
2830 }
2831
2832
2833 if (skb_vlan_tag_present(skb)) {
2834 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2835 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2836
2837 } else if (protocol == htons(ETH_P_8021Q)) {
2838 struct vlan_hdr *vhdr, _vhdr;
2839
2840 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2841 if (!vhdr)
2842 return -EINVAL;
2843
2844 protocol = vhdr->h_vlan_encapsulated_proto;
2845 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2846 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2847 }
2848
2849 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2850 goto out;
2851
2852
2853 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2854 (skb->priority != TC_PRIO_CONTROL)) {
2855 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2856 tx_flags |= (skb->priority & 0x7) <<
2857 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2858 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2859 struct vlan_ethhdr *vhdr;
2860 int rc;
2861
2862 rc = skb_cow_head(skb, 0);
2863 if (rc < 0)
2864 return rc;
2865 vhdr = (struct vlan_ethhdr *)skb->data;
2866 vhdr->h_vlan_TCI = htons(tx_flags >>
2867 I40E_TX_FLAGS_VLAN_SHIFT);
2868 } else {
2869 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2870 }
2871 }
2872
2873 out:
2874 *flags = tx_flags;
2875 return 0;
2876 }
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2887 u64 *cd_type_cmd_tso_mss)
2888 {
2889 struct sk_buff *skb = first->skb;
2890 u64 cd_cmd, cd_tso_len, cd_mss;
2891 union {
2892 struct iphdr *v4;
2893 struct ipv6hdr *v6;
2894 unsigned char *hdr;
2895 } ip;
2896 union {
2897 struct tcphdr *tcp;
2898 struct udphdr *udp;
2899 unsigned char *hdr;
2900 } l4;
2901 u32 paylen, l4_offset;
2902 u16 gso_segs, gso_size;
2903 int err;
2904
2905 if (skb->ip_summed != CHECKSUM_PARTIAL)
2906 return 0;
2907
2908 if (!skb_is_gso(skb))
2909 return 0;
2910
2911 err = skb_cow_head(skb, 0);
2912 if (err < 0)
2913 return err;
2914
2915 ip.hdr = skb_network_header(skb);
2916 l4.hdr = skb_transport_header(skb);
2917
2918
2919 if (ip.v4->version == 4) {
2920 ip.v4->tot_len = 0;
2921 ip.v4->check = 0;
2922 } else {
2923 ip.v6->payload_len = 0;
2924 }
2925
2926 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2927 SKB_GSO_GRE_CSUM |
2928 SKB_GSO_IPXIP4 |
2929 SKB_GSO_IPXIP6 |
2930 SKB_GSO_UDP_TUNNEL |
2931 SKB_GSO_UDP_TUNNEL_CSUM)) {
2932 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2933 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2934 l4.udp->len = 0;
2935
2936
2937 l4_offset = l4.hdr - skb->data;
2938
2939
2940 paylen = skb->len - l4_offset;
2941 csum_replace_by_diff(&l4.udp->check,
2942 (__force __wsum)htonl(paylen));
2943 }
2944
2945
2946 ip.hdr = skb_inner_network_header(skb);
2947 l4.hdr = skb_inner_transport_header(skb);
2948
2949
2950 if (ip.v4->version == 4) {
2951 ip.v4->tot_len = 0;
2952 ip.v4->check = 0;
2953 } else {
2954 ip.v6->payload_len = 0;
2955 }
2956 }
2957
2958
2959 l4_offset = l4.hdr - skb->data;
2960
2961
2962 paylen = skb->len - l4_offset;
2963 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2964
2965
2966 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2967
2968
2969 gso_size = skb_shinfo(skb)->gso_size;
2970 gso_segs = skb_shinfo(skb)->gso_segs;
2971
2972
2973 first->gso_segs = gso_segs;
2974 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2975
2976
2977 cd_cmd = I40E_TX_CTX_DESC_TSO;
2978 cd_tso_len = skb->len - *hdr_len;
2979 cd_mss = gso_size;
2980 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2981 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2982 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2983 return 1;
2984 }
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2996 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2997 {
2998 struct i40e_pf *pf;
2999
3000 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3001 return 0;
3002
3003
3004 if (tx_flags & I40E_TX_FLAGS_TSO)
3005 return 0;
3006
3007
3008
3009
3010 pf = i40e_netdev_to_pf(tx_ring->netdev);
3011 if (!(pf->flags & I40E_FLAG_PTP))
3012 return 0;
3013
3014 if (pf->ptp_tx &&
3015 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3016 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3017 pf->ptp_tx_start = jiffies;
3018 pf->ptp_tx_skb = skb_get(skb);
3019 } else {
3020 pf->tx_hwtstamp_skipped++;
3021 return 0;
3022 }
3023
3024 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3025 I40E_TXD_CTX_QW1_CMD_SHIFT;
3026
3027 return 1;
3028 }
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3040 u32 *td_cmd, u32 *td_offset,
3041 struct i40e_ring *tx_ring,
3042 u32 *cd_tunneling)
3043 {
3044 union {
3045 struct iphdr *v4;
3046 struct ipv6hdr *v6;
3047 unsigned char *hdr;
3048 } ip;
3049 union {
3050 struct tcphdr *tcp;
3051 struct udphdr *udp;
3052 unsigned char *hdr;
3053 } l4;
3054 unsigned char *exthdr;
3055 u32 offset, cmd = 0;
3056 __be16 frag_off;
3057 u8 l4_proto = 0;
3058
3059 if (skb->ip_summed != CHECKSUM_PARTIAL)
3060 return 0;
3061
3062 ip.hdr = skb_network_header(skb);
3063 l4.hdr = skb_transport_header(skb);
3064
3065
3066 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3067
3068 if (skb->encapsulation) {
3069 u32 tunnel = 0;
3070
3071 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3072 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3073 I40E_TX_CTX_EXT_IP_IPV4 :
3074 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3075
3076 l4_proto = ip.v4->protocol;
3077 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3078 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3079
3080 exthdr = ip.hdr + sizeof(*ip.v6);
3081 l4_proto = ip.v6->nexthdr;
3082 if (l4.hdr != exthdr)
3083 ipv6_skip_exthdr(skb, exthdr - skb->data,
3084 &l4_proto, &frag_off);
3085 }
3086
3087
3088 switch (l4_proto) {
3089 case IPPROTO_UDP:
3090 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3091 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3092 break;
3093 case IPPROTO_GRE:
3094 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3095 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3096 break;
3097 case IPPROTO_IPIP:
3098 case IPPROTO_IPV6:
3099 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3100 l4.hdr = skb_inner_network_header(skb);
3101 break;
3102 default:
3103 if (*tx_flags & I40E_TX_FLAGS_TSO)
3104 return -1;
3105
3106 skb_checksum_help(skb);
3107 return 0;
3108 }
3109
3110
3111 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3112 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3113
3114
3115 ip.hdr = skb_inner_network_header(skb);
3116
3117
3118 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3119 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3120
3121
3122 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3123 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3124 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3125 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3126
3127
3128 *cd_tunneling |= tunnel;
3129
3130
3131 l4.hdr = skb_inner_transport_header(skb);
3132 l4_proto = 0;
3133
3134
3135 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3136 if (ip.v4->version == 4)
3137 *tx_flags |= I40E_TX_FLAGS_IPV4;
3138 if (ip.v6->version == 6)
3139 *tx_flags |= I40E_TX_FLAGS_IPV6;
3140 }
3141
3142
3143 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3144 l4_proto = ip.v4->protocol;
3145
3146
3147
3148 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3149 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3150 I40E_TX_DESC_CMD_IIPT_IPV4;
3151 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3152 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3153
3154 exthdr = ip.hdr + sizeof(*ip.v6);
3155 l4_proto = ip.v6->nexthdr;
3156 if (l4.hdr != exthdr)
3157 ipv6_skip_exthdr(skb, exthdr - skb->data,
3158 &l4_proto, &frag_off);
3159 }
3160
3161
3162 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3163
3164
3165 switch (l4_proto) {
3166 case IPPROTO_TCP:
3167
3168 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3169 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3170 break;
3171 case IPPROTO_SCTP:
3172
3173 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3174 offset |= (sizeof(struct sctphdr) >> 2) <<
3175 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3176 break;
3177 case IPPROTO_UDP:
3178
3179 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3180 offset |= (sizeof(struct udphdr) >> 2) <<
3181 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3182 break;
3183 default:
3184 if (*tx_flags & I40E_TX_FLAGS_TSO)
3185 return -1;
3186 skb_checksum_help(skb);
3187 return 0;
3188 }
3189
3190 *td_cmd |= cmd;
3191 *td_offset |= offset;
3192
3193 return 1;
3194 }
3195
3196
3197
3198
3199
3200
3201
3202
3203 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3204 const u64 cd_type_cmd_tso_mss,
3205 const u32 cd_tunneling, const u32 cd_l2tag2)
3206 {
3207 struct i40e_tx_context_desc *context_desc;
3208 int i = tx_ring->next_to_use;
3209
3210 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3211 !cd_tunneling && !cd_l2tag2)
3212 return;
3213
3214
3215 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3216
3217 i++;
3218 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3219
3220
3221 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3222 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3223 context_desc->rsvd = cpu_to_le16(0);
3224 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3225 }
3226
3227
3228
3229
3230
3231
3232
3233
3234 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3235 {
3236 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3237
3238 smp_mb();
3239
3240
3241 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3242 return -EBUSY;
3243
3244
3245 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3246 ++tx_ring->tx_stats.restart_queue;
3247 return 0;
3248 }
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263 bool __i40e_chk_linearize(struct sk_buff *skb)
3264 {
3265 const skb_frag_t *frag, *stale;
3266 int nr_frags, sum;
3267
3268
3269 nr_frags = skb_shinfo(skb)->nr_frags;
3270 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3271 return false;
3272
3273
3274
3275
3276 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3277 frag = &skb_shinfo(skb)->frags[0];
3278
3279
3280
3281
3282
3283
3284
3285 sum = 1 - skb_shinfo(skb)->gso_size;
3286
3287
3288 sum += skb_frag_size(frag++);
3289 sum += skb_frag_size(frag++);
3290 sum += skb_frag_size(frag++);
3291 sum += skb_frag_size(frag++);
3292 sum += skb_frag_size(frag++);
3293
3294
3295
3296
3297 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3298 int stale_size = skb_frag_size(stale);
3299
3300 sum += skb_frag_size(frag++);
3301
3302
3303
3304
3305
3306
3307
3308 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3309 int align_pad = -(skb_frag_off(stale)) &
3310 (I40E_MAX_READ_REQ_SIZE - 1);
3311
3312 sum -= align_pad;
3313 stale_size -= align_pad;
3314
3315 do {
3316 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3317 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3318 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3319 }
3320
3321
3322 if (sum < 0)
3323 return true;
3324
3325 if (!nr_frags--)
3326 break;
3327
3328 sum -= stale_size;
3329 }
3330
3331 return false;
3332 }
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3347 struct i40e_tx_buffer *first, u32 tx_flags,
3348 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3349 {
3350 unsigned int data_len = skb->data_len;
3351 unsigned int size = skb_headlen(skb);
3352 skb_frag_t *frag;
3353 struct i40e_tx_buffer *tx_bi;
3354 struct i40e_tx_desc *tx_desc;
3355 u16 i = tx_ring->next_to_use;
3356 u32 td_tag = 0;
3357 dma_addr_t dma;
3358 u16 desc_count = 1;
3359
3360 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3361 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3362 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3363 I40E_TX_FLAGS_VLAN_SHIFT;
3364 }
3365
3366 first->tx_flags = tx_flags;
3367
3368 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3369
3370 tx_desc = I40E_TX_DESC(tx_ring, i);
3371 tx_bi = first;
3372
3373 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3374 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3375
3376 if (dma_mapping_error(tx_ring->dev, dma))
3377 goto dma_error;
3378
3379
3380 dma_unmap_len_set(tx_bi, len, size);
3381 dma_unmap_addr_set(tx_bi, dma, dma);
3382
3383
3384 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3385 tx_desc->buffer_addr = cpu_to_le64(dma);
3386
3387 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3388 tx_desc->cmd_type_offset_bsz =
3389 build_ctob(td_cmd, td_offset,
3390 max_data, td_tag);
3391
3392 tx_desc++;
3393 i++;
3394 desc_count++;
3395
3396 if (i == tx_ring->count) {
3397 tx_desc = I40E_TX_DESC(tx_ring, 0);
3398 i = 0;
3399 }
3400
3401 dma += max_data;
3402 size -= max_data;
3403
3404 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3405 tx_desc->buffer_addr = cpu_to_le64(dma);
3406 }
3407
3408 if (likely(!data_len))
3409 break;
3410
3411 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3412 size, td_tag);
3413
3414 tx_desc++;
3415 i++;
3416 desc_count++;
3417
3418 if (i == tx_ring->count) {
3419 tx_desc = I40E_TX_DESC(tx_ring, 0);
3420 i = 0;
3421 }
3422
3423 size = skb_frag_size(frag);
3424 data_len -= size;
3425
3426 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3427 DMA_TO_DEVICE);
3428
3429 tx_bi = &tx_ring->tx_bi[i];
3430 }
3431
3432 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3433
3434 i++;
3435 if (i == tx_ring->count)
3436 i = 0;
3437
3438 tx_ring->next_to_use = i;
3439
3440 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3441
3442
3443 td_cmd |= I40E_TX_DESC_CMD_EOP;
3444
3445
3446
3447
3448 desc_count |= ++tx_ring->packet_stride;
3449
3450 if (desc_count >= WB_STRIDE) {
3451
3452 td_cmd |= I40E_TX_DESC_CMD_RS;
3453 tx_ring->packet_stride = 0;
3454 }
3455
3456 tx_desc->cmd_type_offset_bsz =
3457 build_ctob(td_cmd, td_offset, size, td_tag);
3458
3459 skb_tx_timestamp(skb);
3460
3461
3462
3463
3464
3465
3466
3467 wmb();
3468
3469
3470 first->next_to_watch = tx_desc;
3471
3472
3473 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3474 writel(i, tx_ring->tail);
3475 }
3476
3477 return 0;
3478
3479 dma_error:
3480 dev_info(tx_ring->dev, "TX DMA map failed\n");
3481
3482
3483 for (;;) {
3484 tx_bi = &tx_ring->tx_bi[i];
3485 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3486 if (tx_bi == first)
3487 break;
3488 if (i == 0)
3489 i = tx_ring->count;
3490 i--;
3491 }
3492
3493 tx_ring->next_to_use = i;
3494
3495 return -1;
3496 }
3497
3498
3499
3500
3501
3502
3503 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3504 struct i40e_ring *xdp_ring)
3505 {
3506 u16 i = xdp_ring->next_to_use;
3507 struct i40e_tx_buffer *tx_bi;
3508 struct i40e_tx_desc *tx_desc;
3509 void *data = xdpf->data;
3510 u32 size = xdpf->len;
3511 dma_addr_t dma;
3512
3513 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3514 xdp_ring->tx_stats.tx_busy++;
3515 return I40E_XDP_CONSUMED;
3516 }
3517 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3518 if (dma_mapping_error(xdp_ring->dev, dma))
3519 return I40E_XDP_CONSUMED;
3520
3521 tx_bi = &xdp_ring->tx_bi[i];
3522 tx_bi->bytecount = size;
3523 tx_bi->gso_segs = 1;
3524 tx_bi->xdpf = xdpf;
3525
3526
3527 dma_unmap_len_set(tx_bi, len, size);
3528 dma_unmap_addr_set(tx_bi, dma, dma);
3529
3530 tx_desc = I40E_TX_DESC(xdp_ring, i);
3531 tx_desc->buffer_addr = cpu_to_le64(dma);
3532 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3533 | I40E_TXD_CMD,
3534 0, size, 0);
3535
3536
3537
3538
3539 smp_wmb();
3540
3541 i++;
3542 if (i == xdp_ring->count)
3543 i = 0;
3544
3545 tx_bi->next_to_watch = tx_desc;
3546 xdp_ring->next_to_use = i;
3547
3548 return I40E_XDP_TX;
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3559 struct i40e_ring *tx_ring)
3560 {
3561 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3562 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3563 struct i40e_tx_buffer *first;
3564 u32 td_offset = 0;
3565 u32 tx_flags = 0;
3566 __be16 protocol;
3567 u32 td_cmd = 0;
3568 u8 hdr_len = 0;
3569 int tso, count;
3570 int tsyn;
3571
3572
3573 prefetch(skb->data);
3574
3575 i40e_trace(xmit_frame_ring, skb, tx_ring);
3576
3577 count = i40e_xmit_descriptor_count(skb);
3578 if (i40e_chk_linearize(skb, count)) {
3579 if (__skb_linearize(skb)) {
3580 dev_kfree_skb_any(skb);
3581 return NETDEV_TX_OK;
3582 }
3583 count = i40e_txd_use_count(skb->len);
3584 tx_ring->tx_stats.tx_linearize++;
3585 }
3586
3587
3588
3589
3590
3591
3592
3593 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3594 tx_ring->tx_stats.tx_busy++;
3595 return NETDEV_TX_BUSY;
3596 }
3597
3598
3599 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3600 first->skb = skb;
3601 first->bytecount = skb->len;
3602 first->gso_segs = 1;
3603
3604
3605 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3606 goto out_drop;
3607
3608
3609 protocol = vlan_get_protocol(skb);
3610
3611
3612 if (protocol == htons(ETH_P_IP))
3613 tx_flags |= I40E_TX_FLAGS_IPV4;
3614 else if (protocol == htons(ETH_P_IPV6))
3615 tx_flags |= I40E_TX_FLAGS_IPV6;
3616
3617 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3618
3619 if (tso < 0)
3620 goto out_drop;
3621 else if (tso)
3622 tx_flags |= I40E_TX_FLAGS_TSO;
3623
3624
3625 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3626 tx_ring, &cd_tunneling);
3627 if (tso < 0)
3628 goto out_drop;
3629
3630 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3631
3632 if (tsyn)
3633 tx_flags |= I40E_TX_FLAGS_TSYN;
3634
3635
3636 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3637
3638 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3639 cd_tunneling, cd_l2tag2);
3640
3641
3642
3643
3644
3645 i40e_atr(tx_ring, skb, tx_flags);
3646
3647 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3648 td_cmd, td_offset))
3649 goto cleanup_tx_tstamp;
3650
3651 return NETDEV_TX_OK;
3652
3653 out_drop:
3654 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3655 dev_kfree_skb_any(first->skb);
3656 first->skb = NULL;
3657 cleanup_tx_tstamp:
3658 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3659 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3660
3661 dev_kfree_skb_any(pf->ptp_tx_skb);
3662 pf->ptp_tx_skb = NULL;
3663 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3664 }
3665
3666 return NETDEV_TX_OK;
3667 }
3668
3669
3670
3671
3672
3673
3674
3675
3676 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3677 {
3678 struct i40e_netdev_priv *np = netdev_priv(netdev);
3679 struct i40e_vsi *vsi = np->vsi;
3680 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3681
3682
3683
3684
3685 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3686 return NETDEV_TX_OK;
3687
3688 return i40e_xmit_frame_ring(skb, tx_ring);
3689 }
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3703 u32 flags)
3704 {
3705 struct i40e_netdev_priv *np = netdev_priv(dev);
3706 unsigned int queue_index = smp_processor_id();
3707 struct i40e_vsi *vsi = np->vsi;
3708 struct i40e_pf *pf = vsi->back;
3709 struct i40e_ring *xdp_ring;
3710 int drops = 0;
3711 int i;
3712
3713 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3714 return -ENETDOWN;
3715
3716 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3717 test_bit(__I40E_CONFIG_BUSY, pf->state))
3718 return -ENXIO;
3719
3720 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3721 return -EINVAL;
3722
3723 xdp_ring = vsi->xdp_rings[queue_index];
3724
3725 for (i = 0; i < n; i++) {
3726 struct xdp_frame *xdpf = frames[i];
3727 int err;
3728
3729 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3730 if (err != I40E_XDP_TX) {
3731 xdp_return_frame_rx_napi(xdpf);
3732 drops++;
3733 }
3734 }
3735
3736 if (unlikely(flags & XDP_XMIT_FLUSH))
3737 i40e_xdp_ring_update_tail(xdp_ring);
3738
3739 return n - drops;
3740 }