This source file includes following definitions.
- i40e_intrl_usec_to_reg
- i40e_compute_pad
- i40e_skb_pad
- i40e_test_staterr
- i40e_txd_use_count
- ring_uses_build_skb
- set_ring_build_skb_enabled
- clear_ring_build_skb_enabled
- ring_is_xdp
- set_ring_xdp
- i40e_rx_pg_order
- i40e_get_head
- i40e_xmit_descriptor_count
- i40e_maybe_stop_tx
- i40e_chk_linearize
- txring_txq
1
2
3
4 #ifndef _I40E_TXRX_H_
5 #define _I40E_TXRX_H_
6
7 #include <net/xdp.h>
8
9
10 #define I40E_DEFAULT_IRQ_WORK 256
11
12
13
14
15
16
17
18 #define I40E_ITR_DYNAMIC 0x8000
19 #define I40E_ITR_MASK 0x1FFE
20 #define I40E_MIN_ITR 2
21 #define I40E_ITR_100K 10
22 #define I40E_ITR_50K 20
23 #define I40E_ITR_20K 50
24 #define I40E_ITR_18K 60
25 #define I40E_ITR_8K 122
26 #define I40E_MAX_ITR 8160
27 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
28 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
29 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
30
31 #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
32 #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
33
34
35
36
37 #define INTRL_ENA BIT(6)
38 #define I40E_MAX_INTRL 0x3B
39 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
40
41
42
43
44
45
46
47
48 static inline u16 i40e_intrl_usec_to_reg(int intrl)
49 {
50 if (intrl >> 2)
51 return ((intrl >> 2) | INTRL_ENA);
52 else
53 return 0;
54 }
55 #define I40E_INTRL_8K 125
56 #define I40E_INTRL_62K 16
57 #define I40E_INTRL_83K 12
58
59 #define I40E_QUEUE_END_OF_LIST 0x7FF
60
61
62
63
64
65
66 enum i40e_dyn_idx_t {
67 I40E_IDX_ITR0 = 0,
68 I40E_IDX_ITR1 = 1,
69 I40E_IDX_ITR2 = 2,
70 I40E_ITR_NONE = 3
71 };
72
73
74 #define I40E_RX_ITR I40E_IDX_ITR0
75 #define I40E_TX_ITR I40E_IDX_ITR1
76 #define I40E_PE_ITR I40E_IDX_ITR2
77
78
79 #define I40E_DEFAULT_RSS_HENA ( \
80 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
84 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
85 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
91
92 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
93 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
94 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
99
100 #define i40e_pf_get_default_rss_hena(pf) \
101 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
102 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
103
104
105 #define I40E_RXBUFFER_256 256
106 #define I40E_RXBUFFER_1536 1536
107 #define I40E_RXBUFFER_2048 2048
108 #define I40E_RXBUFFER_3072 3072
109 #define I40E_MAX_RXBUFFER 9728
110
111
112
113
114
115
116
117
118 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
119 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
120 #define i40e_rx_desc i40e_32byte_rx_desc
121
122 #define I40E_RX_DMA_ATTR \
123 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
124
125
126
127
128
129
130
131
132
133
134
135 #if (PAGE_SIZE < 8192)
136 #define I40E_2K_TOO_SMALL_WITH_PADDING \
137 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
138
139 static inline int i40e_compute_pad(int rx_buf_len)
140 {
141 int page_size, pad_size;
142
143 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
144 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
145
146 return pad_size;
147 }
148
149 static inline int i40e_skb_pad(void)
150 {
151 int rx_buf_len;
152
153
154
155
156
157
158
159
160 if (I40E_2K_TOO_SMALL_WITH_PADDING)
161 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
162 else
163 rx_buf_len = I40E_RXBUFFER_1536;
164
165
166 rx_buf_len -= NET_IP_ALIGN;
167
168 return i40e_compute_pad(rx_buf_len);
169 }
170
171 #define I40E_SKB_PAD i40e_skb_pad()
172 #else
173 #define I40E_2K_TOO_SMALL_WITH_PADDING false
174 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
175 #endif
176
177
178
179
180
181
182
183
184
185
186
187 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
188 const u64 stat_err_bits)
189 {
190 return !!(rx_desc->wb.qword1.status_error_len &
191 cpu_to_le64(stat_err_bits));
192 }
193
194
195 #define I40E_RX_BUFFER_WRITE 32
196 #define I40E_RX_INCREMENT(r, i) \
197 do { \
198 (i)++; \
199 if ((i) == (r)->count) \
200 i = 0; \
201 r->next_to_clean = i; \
202 } while (0)
203
204 #define I40E_RX_NEXT_DESC(r, i, n) \
205 do { \
206 (i)++; \
207 if ((i) == (r)->count) \
208 i = 0; \
209 (n) = I40E_RX_DESC((r), (i)); \
210 } while (0)
211
212 #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
213 do { \
214 I40E_RX_NEXT_DESC((r), (i), (n)); \
215 prefetch((n)); \
216 } while (0)
217
218 #define I40E_MAX_BUFFER_TXD 8
219 #define I40E_MIN_TX_LEN 17
220
221
222
223
224
225 #define I40E_MAX_READ_REQ_SIZE 4096
226 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
227 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
228 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258 static inline unsigned int i40e_txd_use_count(unsigned int size)
259 {
260 return ((size * 85) >> 20) + 1;
261 }
262
263
264 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
265 #define I40E_MIN_DESC_PENDING 4
266
267 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
268 #define I40E_TX_FLAGS_SW_VLAN BIT(2)
269 #define I40E_TX_FLAGS_TSO BIT(3)
270 #define I40E_TX_FLAGS_IPV4 BIT(4)
271 #define I40E_TX_FLAGS_IPV6 BIT(5)
272 #define I40E_TX_FLAGS_FCCRC BIT(6)
273 #define I40E_TX_FLAGS_FSO BIT(7)
274 #define I40E_TX_FLAGS_TSYN BIT(8)
275 #define I40E_TX_FLAGS_FD_SB BIT(9)
276 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
277 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
278 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
279 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
280 #define I40E_TX_FLAGS_VLAN_SHIFT 16
281
282 struct i40e_tx_buffer {
283 struct i40e_tx_desc *next_to_watch;
284 union {
285 struct xdp_frame *xdpf;
286 struct sk_buff *skb;
287 void *raw_buf;
288 };
289 unsigned int bytecount;
290 unsigned short gso_segs;
291
292 DEFINE_DMA_UNMAP_ADDR(dma);
293 DEFINE_DMA_UNMAP_LEN(len);
294 u32 tx_flags;
295 };
296
297 struct i40e_rx_buffer {
298 dma_addr_t dma;
299 union {
300 struct {
301 struct page *page;
302 __u32 page_offset;
303 __u16 pagecnt_bias;
304 };
305 struct {
306 void *addr;
307 u64 handle;
308 };
309 };
310 };
311
312 struct i40e_queue_stats {
313 u64 packets;
314 u64 bytes;
315 };
316
317 struct i40e_tx_queue_stats {
318 u64 restart_queue;
319 u64 tx_busy;
320 u64 tx_done_old;
321 u64 tx_linearize;
322 u64 tx_force_wb;
323 int prev_pkt_ctr;
324 };
325
326 struct i40e_rx_queue_stats {
327 u64 non_eop_descs;
328 u64 alloc_page_failed;
329 u64 alloc_buff_failed;
330 u64 page_reuse_count;
331 u64 realloc_count;
332 };
333
334 enum i40e_ring_state_t {
335 __I40E_TX_FDIR_INIT_DONE,
336 __I40E_TX_XPS_INIT_DONE,
337 __I40E_RING_STATE_NBITS
338 };
339
340
341
342
343 #define I40E_RX_DTYPE_NO_SPLIT 0
344 #define I40E_RX_DTYPE_HEADER_SPLIT 1
345 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
346 #define I40E_RX_SPLIT_L2 0x1
347 #define I40E_RX_SPLIT_IP 0x2
348 #define I40E_RX_SPLIT_TCP_UDP 0x4
349 #define I40E_RX_SPLIT_SCTP 0x8
350
351
352 struct i40e_ring {
353 struct i40e_ring *next;
354 void *desc;
355 struct device *dev;
356 struct net_device *netdev;
357 struct bpf_prog *xdp_prog;
358 union {
359 struct i40e_tx_buffer *tx_bi;
360 struct i40e_rx_buffer *rx_bi;
361 };
362 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
363 u16 queue_index;
364 u8 dcb_tc;
365 u8 __iomem *tail;
366
367
368
369
370
371
372 u16 itr_setting;
373
374 u16 count;
375 u16 reg_idx;
376 u16 rx_buf_len;
377
378
379 u16 next_to_use;
380 u16 next_to_clean;
381
382 u8 atr_sample_rate;
383 u8 atr_count;
384
385 bool ring_active;
386 bool arm_wb;
387 u8 packet_stride;
388
389 u16 flags;
390 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
391 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
392 #define I40E_TXR_FLAGS_XDP BIT(2)
393
394
395 struct i40e_queue_stats stats;
396 struct u64_stats_sync syncp;
397 union {
398 struct i40e_tx_queue_stats tx_stats;
399 struct i40e_rx_queue_stats rx_stats;
400 };
401
402 unsigned int size;
403 dma_addr_t dma;
404
405 struct i40e_vsi *vsi;
406 struct i40e_q_vector *q_vector;
407
408 struct rcu_head rcu;
409 u16 next_to_alloc;
410 struct sk_buff *skb;
411
412
413
414
415
416
417
418
419 struct i40e_channel *ch;
420 struct xdp_rxq_info xdp_rxq;
421 struct xdp_umem *xsk_umem;
422 struct zero_copy_allocator zca;
423 } ____cacheline_internodealigned_in_smp;
424
425 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
426 {
427 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
428 }
429
430 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
431 {
432 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
433 }
434
435 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
436 {
437 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
438 }
439
440 static inline bool ring_is_xdp(struct i40e_ring *ring)
441 {
442 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
443 }
444
445 static inline void set_ring_xdp(struct i40e_ring *ring)
446 {
447 ring->flags |= I40E_TXR_FLAGS_XDP;
448 }
449
450 #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
451 #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
452 #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
453 #define I40E_ITR_ADAPTIVE_LATENCY 0x8000
454 #define I40E_ITR_ADAPTIVE_BULK 0x0000
455 #define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
456
457 struct i40e_ring_container {
458 struct i40e_ring *ring;
459 unsigned long next_update;
460 unsigned int total_bytes;
461 unsigned int total_packets;
462 u16 count;
463 u16 target_itr;
464 u16 current_itr;
465 };
466
467
468 #define i40e_for_each_ring(pos, head) \
469 for (pos = (head).ring; pos != NULL; pos = pos->next)
470
471 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
472 {
473 #if (PAGE_SIZE < 8192)
474 if (ring->rx_buf_len > (PAGE_SIZE / 2))
475 return 1;
476 #endif
477 return 0;
478 }
479
480 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
481
482 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
483 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
484 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
485 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
486 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
487 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
488 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
489 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
490 int i40e_napi_poll(struct napi_struct *napi, int budget);
491 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
492 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
493 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
494 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
495 bool __i40e_chk_linearize(struct sk_buff *skb);
496 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
497 u32 flags);
498
499
500
501
502
503
504
505
506 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
507 {
508 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
509
510 return le32_to_cpu(*(volatile __le32 *)head);
511 }
512
513
514
515
516
517
518
519
520
521
522 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
523 {
524 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
525 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
526 int count = 0, size = skb_headlen(skb);
527
528 for (;;) {
529 count += i40e_txd_use_count(size);
530
531 if (!nr_frags--)
532 break;
533
534 size = skb_frag_size(frag++);
535 }
536
537 return count;
538 }
539
540
541
542
543
544
545
546
547 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
548 {
549 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
550 return 0;
551 return __i40e_maybe_stop_tx(tx_ring, size);
552 }
553
554
555
556
557
558
559
560
561
562
563 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
564 {
565
566 if (likely(count < I40E_MAX_BUFFER_TXD))
567 return false;
568
569 if (skb_is_gso(skb))
570 return __i40e_chk_linearize(skb);
571
572
573 return count != I40E_MAX_BUFFER_TXD;
574 }
575
576
577
578
579
580 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
581 {
582 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
583 }
584 #endif