This source file includes following definitions.
- iavf_compute_pad
- iavf_skb_pad
- iavf_test_staterr
- iavf_txd_use_count
- ring_uses_build_skb
- set_ring_build_skb_enabled
- clear_ring_build_skb_enabled
- iavf_rx_pg_order
- iavf_xmit_descriptor_count
- iavf_maybe_stop_tx
- iavf_chk_linearize
- txring_txq
1
2
3
4 #ifndef _IAVF_TXRX_H_
5 #define _IAVF_TXRX_H_
6
7
8 #define IAVF_DEFAULT_IRQ_WORK 256
9
10
11
12
13
14
15
16 #define IAVF_ITR_DYNAMIC 0x8000
17 #define IAVF_ITR_MASK 0x1FFE
18 #define IAVF_MIN_ITR 2
19 #define IAVF_ITR_100K 10
20 #define IAVF_ITR_50K 20
21 #define IAVF_ITR_20K 50
22 #define IAVF_ITR_18K 60
23 #define IAVF_ITR_8K 122
24 #define IAVF_MAX_ITR 8160
25 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
26 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
27 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
28
29 #define IAVF_ITR_RX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
30 #define IAVF_ITR_TX_DEF (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
31
32
33
34
35 #define INTRL_ENA BIT(6)
36 #define IAVF_MAX_INTRL 0x3B
37 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
38 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
39 #define IAVF_INTRL_8K 125
40 #define IAVF_INTRL_62K 16
41 #define IAVF_INTRL_83K 12
42
43 #define IAVF_QUEUE_END_OF_LIST 0x7FF
44
45
46
47
48
49
50 enum iavf_dyn_idx_t {
51 IAVF_IDX_ITR0 = 0,
52 IAVF_IDX_ITR1 = 1,
53 IAVF_IDX_ITR2 = 2,
54 IAVF_ITR_NONE = 3
55 };
56
57
58 #define IAVF_RX_ITR IAVF_IDX_ITR0
59 #define IAVF_TX_ITR IAVF_IDX_ITR1
60 #define IAVF_PE_ITR IAVF_IDX_ITR2
61
62
63 #define IAVF_DEFAULT_RSS_HENA ( \
64 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
65 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
66 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
67 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
68 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
69 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
70 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
71 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
72 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
73 BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
74 BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
75
76 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
77 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
78 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
79 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
80 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
81 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
82 BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
83
84
85 #define IAVF_RXBUFFER_256 256
86 #define IAVF_RXBUFFER_1536 1536
87 #define IAVF_RXBUFFER_2048 2048
88 #define IAVF_RXBUFFER_3072 3072
89 #define IAVF_MAX_RXBUFFER 9728
90
91
92
93
94
95
96
97
98 #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
99 #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
100 #define iavf_rx_desc iavf_32byte_rx_desc
101
102 #define IAVF_RX_DMA_ATTR \
103 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
104
105
106
107
108
109
110
111
112
113
114
115 #if (PAGE_SIZE < 8192)
116 #define IAVF_2K_TOO_SMALL_WITH_PADDING \
117 ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
118
119 static inline int iavf_compute_pad(int rx_buf_len)
120 {
121 int page_size, pad_size;
122
123 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
124 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
125
126 return pad_size;
127 }
128
129 static inline int iavf_skb_pad(void)
130 {
131 int rx_buf_len;
132
133
134
135
136
137
138
139
140 if (IAVF_2K_TOO_SMALL_WITH_PADDING)
141 rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
142 else
143 rx_buf_len = IAVF_RXBUFFER_1536;
144
145
146 rx_buf_len -= NET_IP_ALIGN;
147
148 return iavf_compute_pad(rx_buf_len);
149 }
150
151 #define IAVF_SKB_PAD iavf_skb_pad()
152 #else
153 #define IAVF_2K_TOO_SMALL_WITH_PADDING false
154 #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
155 #endif
156
157
158
159
160
161
162
163
164
165
166
167 static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
168 const u64 stat_err_bits)
169 {
170 return !!(rx_desc->wb.qword1.status_error_len &
171 cpu_to_le64(stat_err_bits));
172 }
173
174
175 #define IAVF_RX_INCREMENT(r, i) \
176 do { \
177 (i)++; \
178 if ((i) == (r)->count) \
179 i = 0; \
180 r->next_to_clean = i; \
181 } while (0)
182
183 #define IAVF_RX_NEXT_DESC(r, i, n) \
184 do { \
185 (i)++; \
186 if ((i) == (r)->count) \
187 i = 0; \
188 (n) = IAVF_RX_DESC((r), (i)); \
189 } while (0)
190
191 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n) \
192 do { \
193 IAVF_RX_NEXT_DESC((r), (i), (n)); \
194 prefetch((n)); \
195 } while (0)
196
197 #define IAVF_MAX_BUFFER_TXD 8
198 #define IAVF_MIN_TX_LEN 17
199
200
201
202
203
204 #define IAVF_MAX_READ_REQ_SIZE 4096
205 #define IAVF_MAX_DATA_PER_TXD (16 * 1024 - 1)
206 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
207 (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237 static inline unsigned int iavf_txd_use_count(unsigned int size)
238 {
239 return ((size * 85) >> 20) + 1;
240 }
241
242
243 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
244 #define IAVF_MIN_DESC_PENDING 4
245
246 #define IAVF_TX_FLAGS_HW_VLAN BIT(1)
247 #define IAVF_TX_FLAGS_SW_VLAN BIT(2)
248 #define IAVF_TX_FLAGS_TSO BIT(3)
249 #define IAVF_TX_FLAGS_IPV4 BIT(4)
250 #define IAVF_TX_FLAGS_IPV6 BIT(5)
251 #define IAVF_TX_FLAGS_FCCRC BIT(6)
252 #define IAVF_TX_FLAGS_FSO BIT(7)
253 #define IAVF_TX_FLAGS_FD_SB BIT(9)
254 #define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
255 #define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
256 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
257 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
258 #define IAVF_TX_FLAGS_VLAN_SHIFT 16
259
260 struct iavf_tx_buffer {
261 struct iavf_tx_desc *next_to_watch;
262 union {
263 struct sk_buff *skb;
264 void *raw_buf;
265 };
266 unsigned int bytecount;
267 unsigned short gso_segs;
268
269 DEFINE_DMA_UNMAP_ADDR(dma);
270 DEFINE_DMA_UNMAP_LEN(len);
271 u32 tx_flags;
272 };
273
274 struct iavf_rx_buffer {
275 dma_addr_t dma;
276 struct page *page;
277 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
278 __u32 page_offset;
279 #else
280 __u16 page_offset;
281 #endif
282 __u16 pagecnt_bias;
283 };
284
285 struct iavf_queue_stats {
286 u64 packets;
287 u64 bytes;
288 };
289
290 struct iavf_tx_queue_stats {
291 u64 restart_queue;
292 u64 tx_busy;
293 u64 tx_done_old;
294 u64 tx_linearize;
295 u64 tx_force_wb;
296 int prev_pkt_ctr;
297 u64 tx_lost_interrupt;
298 };
299
300 struct iavf_rx_queue_stats {
301 u64 non_eop_descs;
302 u64 alloc_page_failed;
303 u64 alloc_buff_failed;
304 u64 page_reuse_count;
305 u64 realloc_count;
306 };
307
308 enum iavf_ring_state_t {
309 __IAVF_TX_FDIR_INIT_DONE,
310 __IAVF_TX_XPS_INIT_DONE,
311 __IAVF_RING_STATE_NBITS
312 };
313
314
315
316
317 #define IAVF_RX_DTYPE_NO_SPLIT 0
318 #define IAVF_RX_DTYPE_HEADER_SPLIT 1
319 #define IAVF_RX_DTYPE_SPLIT_ALWAYS 2
320 #define IAVF_RX_SPLIT_L2 0x1
321 #define IAVF_RX_SPLIT_IP 0x2
322 #define IAVF_RX_SPLIT_TCP_UDP 0x4
323 #define IAVF_RX_SPLIT_SCTP 0x8
324
325
326 struct iavf_ring {
327 struct iavf_ring *next;
328 void *desc;
329 struct device *dev;
330 struct net_device *netdev;
331 union {
332 struct iavf_tx_buffer *tx_bi;
333 struct iavf_rx_buffer *rx_bi;
334 };
335 DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
336 u16 queue_index;
337 u8 dcb_tc;
338 u8 __iomem *tail;
339
340
341
342
343
344
345 u16 itr_setting;
346
347 u16 count;
348 u16 reg_idx;
349 u16 rx_buf_len;
350
351
352 u16 next_to_use;
353 u16 next_to_clean;
354
355 u8 atr_sample_rate;
356 u8 atr_count;
357
358 bool ring_active;
359 bool arm_wb;
360 u8 packet_stride;
361
362 u16 flags;
363 #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
364 #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
365
366
367 struct iavf_queue_stats stats;
368 struct u64_stats_sync syncp;
369 union {
370 struct iavf_tx_queue_stats tx_stats;
371 struct iavf_rx_queue_stats rx_stats;
372 };
373
374 unsigned int size;
375 dma_addr_t dma;
376
377 struct iavf_vsi *vsi;
378 struct iavf_q_vector *q_vector;
379
380 struct rcu_head rcu;
381 u16 next_to_alloc;
382 struct sk_buff *skb;
383
384
385
386
387
388
389
390 } ____cacheline_internodealigned_in_smp;
391
392 static inline bool ring_uses_build_skb(struct iavf_ring *ring)
393 {
394 return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
395 }
396
397 static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
398 {
399 ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
400 }
401
402 static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
403 {
404 ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
405 }
406
407 #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
408 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
409 #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
410 #define IAVF_ITR_ADAPTIVE_LATENCY 0x8000
411 #define IAVF_ITR_ADAPTIVE_BULK 0x0000
412 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
413
414 struct iavf_ring_container {
415 struct iavf_ring *ring;
416 unsigned long next_update;
417 unsigned int total_bytes;
418 unsigned int total_packets;
419 u16 count;
420 u16 target_itr;
421 u16 current_itr;
422 };
423
424
425 #define iavf_for_each_ring(pos, head) \
426 for (pos = (head).ring; pos != NULL; pos = pos->next)
427
428 static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
429 {
430 #if (PAGE_SIZE < 8192)
431 if (ring->rx_buf_len > (PAGE_SIZE / 2))
432 return 1;
433 #endif
434 return 0;
435 }
436
437 #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
438
439 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
440 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
441 void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
442 void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
443 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
444 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
445 void iavf_free_tx_resources(struct iavf_ring *tx_ring);
446 void iavf_free_rx_resources(struct iavf_ring *rx_ring);
447 int iavf_napi_poll(struct napi_struct *napi, int budget);
448 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
449 u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
450 void iavf_detect_recover_hung(struct iavf_vsi *vsi);
451 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
452 bool __iavf_chk_linearize(struct sk_buff *skb);
453
454
455
456
457
458
459
460
461
462
463 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
464 {
465 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
466 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
467 int count = 0, size = skb_headlen(skb);
468
469 for (;;) {
470 count += iavf_txd_use_count(size);
471
472 if (!nr_frags--)
473 break;
474
475 size = skb_frag_size(frag++);
476 }
477
478 return count;
479 }
480
481
482
483
484
485
486
487
488 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
489 {
490 if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
491 return 0;
492 return __iavf_maybe_stop_tx(tx_ring, size);
493 }
494
495
496
497
498
499
500
501
502
503
504 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
505 {
506
507 if (likely(count < IAVF_MAX_BUFFER_TXD))
508 return false;
509
510 if (skb_is_gso(skb))
511 return __iavf_chk_linearize(skb);
512
513
514 return count != IAVF_MAX_BUFFER_TXD;
515 }
516
517
518
519 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
520 {
521 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
522 }
523 #endif