This source file includes following definitions.
- mlx5e_rx_hw_stamp
- mlx5e_read_cqe_slot
- mlx5e_read_title_slot
- mlx5e_read_mini_arr_slot
- mlx5e_cqes_update_owner
- mlx5e_decompress_cqe
- mlx5e_decompress_cqe_no_hash
- mlx5e_decompress_cqes_cont
- mlx5e_decompress_cqes_start
- mlx5e_page_is_reserved
- mlx5e_rx_cache_put
- mlx5e_rx_cache_get
- mlx5e_page_alloc_pool
- mlx5e_page_alloc
- mlx5e_page_dma_unmap
- mlx5e_page_release_dynamic
- mlx5e_page_release
- mlx5e_get_rx_frag
- mlx5e_put_rx_frag
- get_frag
- mlx5e_alloc_rx_wqe
- mlx5e_free_rx_wqe
- mlx5e_dealloc_rx_wqe
- mlx5e_alloc_rx_wqes
- mlx5e_add_skb_frag
- mlx5e_copy_skb_header
- mlx5e_free_rx_mpwqe
- mlx5e_post_rx_mpwqe
- mlx5e_fill_icosq_frag_edge
- mlx5e_alloc_rx_mpwqe
- mlx5e_dealloc_rx_mpwqe
- mlx5e_post_rx_wqes
- mlx5e_poll_ico_cq
- mlx5e_post_rx_mpwqes
- mlx5e_lro_update_tcp_hdr
- mlx5e_lro_update_hdr
- mlx5e_skb_set_hash
- is_last_ethertype_ip
- mlx5e_enable_ecn
- get_ip_proto
- tail_padding_csum_slow
- tail_padding_csum
- mlx5e_skb_csum_fixup
- mlx5e_handle_csum
- mlx5e_build_rx_skb
- mlx5e_complete_rx_cqe
- mlx5e_build_linear_skb
- mlx5e_skb_from_cqe_linear
- mlx5e_skb_from_cqe_nonlinear
- trigger_report
- mlx5e_handle_rx_cqe
- mlx5e_handle_rx_cqe_rep
- mlx5e_skb_from_cqe_mpwrq_nonlinear
- mlx5e_skb_from_cqe_mpwrq_linear
- mlx5e_handle_rx_cqe_mpwrq
- mlx5e_poll_rx_cq
- mlx5i_complete_rx_cqe
- mlx5i_handle_rx_cqe
- mlx5e_ipsec_handle_rx_cqe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/prefetch.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/indirect_call_wrapper.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool.h>
40 #include <net/inet_ecn.h>
41 #include "en.h"
42 #include "en_tc.h"
43 #include "eswitch.h"
44 #include "en_rep.h"
45 #include "ipoib/ipoib.h"
46 #include "en_accel/ipsec_rxtx.h"
47 #include "en_accel/tls_rxtx.h"
48 #include "lib/clock.h"
49 #include "en/xdp.h"
50 #include "en/xsk/rx.h"
51 #include "en/health.h"
52
53 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
54 {
55 return config->rx_filter == HWTSTAMP_FILTER_ALL;
56 }
57
58 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
59 u32 cqcc, void *data)
60 {
61 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
62
63 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
64 }
65
66 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
67 struct mlx5_cqwq *wq,
68 u32 cqcc)
69 {
70 struct mlx5e_cq_decomp *cqd = &rq->cqd;
71 struct mlx5_cqe64 *title = &cqd->title;
72
73 mlx5e_read_cqe_slot(wq, cqcc, title);
74 cqd->left = be32_to_cpu(title->byte_cnt);
75 cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
76 rq->stats->cqe_compress_blks++;
77 }
78
79 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
80 struct mlx5e_cq_decomp *cqd,
81 u32 cqcc)
82 {
83 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
84 cqd->mini_arr_idx = 0;
85 }
86
87 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
88 {
89 u32 cqcc = wq->cc;
90 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
91 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
92 u32 wq_sz = mlx5_cqwq_get_size(wq);
93 u32 ci_top = min_t(u32, wq_sz, ci + n);
94
95 for (; ci < ci_top; ci++, n--) {
96 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
97
98 cqe->op_own = op_own;
99 }
100
101 if (unlikely(ci == wq_sz)) {
102 op_own = !op_own;
103 for (ci = 0; ci < n; ci++) {
104 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
105
106 cqe->op_own = op_own;
107 }
108 }
109 }
110
111 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
112 struct mlx5_cqwq *wq,
113 u32 cqcc)
114 {
115 struct mlx5e_cq_decomp *cqd = &rq->cqd;
116 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
117 struct mlx5_cqe64 *title = &cqd->title;
118
119 title->byte_cnt = mini_cqe->byte_cnt;
120 title->check_sum = mini_cqe->checksum;
121 title->op_own &= 0xf0;
122 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
123 title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
124
125 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
126 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
127 else
128 cqd->wqe_counter =
129 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
130 }
131
132 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
133 struct mlx5_cqwq *wq,
134 u32 cqcc)
135 {
136 struct mlx5e_cq_decomp *cqd = &rq->cqd;
137
138 mlx5e_decompress_cqe(rq, wq, cqcc);
139 cqd->title.rss_hash_type = 0;
140 cqd->title.rss_hash_result = 0;
141 }
142
143 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
144 struct mlx5_cqwq *wq,
145 int update_owner_only,
146 int budget_rem)
147 {
148 struct mlx5e_cq_decomp *cqd = &rq->cqd;
149 u32 cqcc = wq->cc + update_owner_only;
150 u32 cqe_count;
151 u32 i;
152
153 cqe_count = min_t(u32, cqd->left, budget_rem);
154
155 for (i = update_owner_only; i < cqe_count;
156 i++, cqd->mini_arr_idx++, cqcc++) {
157 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
158 mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
159
160 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
161 rq->handle_rx_cqe(rq, &cqd->title);
162 }
163 mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
164 wq->cc = cqcc;
165 cqd->left -= cqe_count;
166 rq->stats->cqe_compress_pkts += cqe_count;
167
168 return cqe_count;
169 }
170
171 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
172 struct mlx5_cqwq *wq,
173 int budget_rem)
174 {
175 struct mlx5e_cq_decomp *cqd = &rq->cqd;
176 u32 cc = wq->cc;
177
178 mlx5e_read_title_slot(rq, wq, cc);
179 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
180 mlx5e_decompress_cqe(rq, wq, cc);
181 rq->handle_rx_cqe(rq, &cqd->title);
182 cqd->mini_arr_idx++;
183
184 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
185 }
186
187 static inline bool mlx5e_page_is_reserved(struct page *page)
188 {
189 return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
190 }
191
192 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
193 struct mlx5e_dma_info *dma_info)
194 {
195 struct mlx5e_page_cache *cache = &rq->page_cache;
196 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
197 struct mlx5e_rq_stats *stats = rq->stats;
198
199 if (tail_next == cache->head) {
200 stats->cache_full++;
201 return false;
202 }
203
204 if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
205 stats->cache_waive++;
206 return false;
207 }
208
209 cache->page_cache[cache->tail] = *dma_info;
210 cache->tail = tail_next;
211 return true;
212 }
213
214 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
215 struct mlx5e_dma_info *dma_info)
216 {
217 struct mlx5e_page_cache *cache = &rq->page_cache;
218 struct mlx5e_rq_stats *stats = rq->stats;
219
220 if (unlikely(cache->head == cache->tail)) {
221 stats->cache_empty++;
222 return false;
223 }
224
225 if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
226 stats->cache_busy++;
227 return false;
228 }
229
230 *dma_info = cache->page_cache[cache->head];
231 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
232 stats->cache_reuse++;
233
234 dma_sync_single_for_device(rq->pdev, dma_info->addr,
235 PAGE_SIZE,
236 DMA_FROM_DEVICE);
237 return true;
238 }
239
240 static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
241 struct mlx5e_dma_info *dma_info)
242 {
243 if (mlx5e_rx_cache_get(rq, dma_info))
244 return 0;
245
246 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
247 if (unlikely(!dma_info->page))
248 return -ENOMEM;
249
250 dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
251 PAGE_SIZE, rq->buff.map_dir);
252 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
253 page_pool_recycle_direct(rq->page_pool, dma_info->page);
254 dma_info->page = NULL;
255 return -ENOMEM;
256 }
257
258 return 0;
259 }
260
261 static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
262 struct mlx5e_dma_info *dma_info)
263 {
264 if (rq->umem)
265 return mlx5e_xsk_page_alloc_umem(rq, dma_info);
266 else
267 return mlx5e_page_alloc_pool(rq, dma_info);
268 }
269
270 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
271 {
272 dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
273 }
274
275 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
276 struct mlx5e_dma_info *dma_info,
277 bool recycle)
278 {
279 if (likely(recycle)) {
280 if (mlx5e_rx_cache_put(rq, dma_info))
281 return;
282
283 mlx5e_page_dma_unmap(rq, dma_info);
284 page_pool_recycle_direct(rq->page_pool, dma_info->page);
285 } else {
286 mlx5e_page_dma_unmap(rq, dma_info);
287 page_pool_release_page(rq->page_pool, dma_info->page);
288 put_page(dma_info->page);
289 }
290 }
291
292 static inline void mlx5e_page_release(struct mlx5e_rq *rq,
293 struct mlx5e_dma_info *dma_info,
294 bool recycle)
295 {
296 if (rq->umem)
297
298
299
300
301 mlx5e_xsk_page_release(rq, dma_info);
302 else
303 mlx5e_page_release_dynamic(rq, dma_info, recycle);
304 }
305
306 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
307 struct mlx5e_wqe_frag_info *frag)
308 {
309 int err = 0;
310
311 if (!frag->offset)
312
313
314
315
316
317 err = mlx5e_page_alloc(rq, frag->di);
318
319 return err;
320 }
321
322 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
323 struct mlx5e_wqe_frag_info *frag,
324 bool recycle)
325 {
326 if (frag->last_in_page)
327 mlx5e_page_release(rq, frag->di, recycle);
328 }
329
330 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
331 {
332 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
333 }
334
335 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
336 u16 ix)
337 {
338 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
339 int err;
340 int i;
341
342 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
343 err = mlx5e_get_rx_frag(rq, frag);
344 if (unlikely(err))
345 goto free_frags;
346
347 wqe->data[i].addr = cpu_to_be64(frag->di->addr +
348 frag->offset + rq->buff.headroom);
349 }
350
351 return 0;
352
353 free_frags:
354 while (--i >= 0)
355 mlx5e_put_rx_frag(rq, --frag, true);
356
357 return err;
358 }
359
360 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
361 struct mlx5e_wqe_frag_info *wi,
362 bool recycle)
363 {
364 int i;
365
366 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
367 mlx5e_put_rx_frag(rq, wi, recycle);
368 }
369
370 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
371 {
372 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
373
374 mlx5e_free_rx_wqe(rq, wi, false);
375 }
376
377 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
378 {
379 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
380 int err;
381 int i;
382
383 if (rq->umem) {
384 int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
385
386 if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
387 return -ENOMEM;
388 }
389
390 for (i = 0; i < wqe_bulk; i++) {
391 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
392
393 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
394 if (unlikely(err))
395 goto free_wqes;
396 }
397
398 return 0;
399
400 free_wqes:
401 while (--i >= 0)
402 mlx5e_dealloc_rx_wqe(rq, ix + i);
403
404 return err;
405 }
406
407 static inline void
408 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
409 struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
410 unsigned int truesize)
411 {
412 dma_sync_single_for_cpu(rq->pdev,
413 di->addr + frag_offset,
414 len, DMA_FROM_DEVICE);
415 page_ref_inc(di->page);
416 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
417 di->page, frag_offset, len, truesize);
418 }
419
420 static inline void
421 mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
422 struct mlx5e_dma_info *dma_info,
423 int offset_from, u32 headlen)
424 {
425 const void *from = page_address(dma_info->page) + offset_from;
426
427 unsigned int len = ALIGN(headlen, sizeof(long));
428
429 dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
430 DMA_FROM_DEVICE);
431 skb_copy_to_linear_data(skb, from, len);
432 }
433
434 static void
435 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
436 {
437 bool no_xdp_xmit;
438 struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
439 int i;
440
441
442 if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
443 return;
444
445 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
446 MLX5_MPWRQ_PAGES_PER_WQE);
447
448 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
449 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
450 mlx5e_page_release(rq, &dma_info[i], recycle);
451 }
452
453 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
454 {
455 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
456
457 do {
458 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
459
460 mlx5_wq_ll_push(wq, next_wqe_index);
461 } while (--n);
462
463
464 dma_wmb();
465
466 mlx5_wq_ll_update_db_record(wq);
467 }
468
469 static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
470 struct mlx5_wq_cyc *wq,
471 u16 pi, u16 nnops)
472 {
473 struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
474
475 edge_wi = wi + nnops;
476
477
478 for (; wi < edge_wi; wi++) {
479 wi->opcode = MLX5_OPCODE_NOP;
480 wi->num_wqebbs = 1;
481 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
482 }
483 }
484
485 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
486 {
487 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
488 struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
489 struct mlx5e_icosq *sq = &rq->channel->icosq;
490 struct mlx5_wq_cyc *wq = &sq->wq;
491 struct mlx5e_umr_wqe *umr_wqe;
492 u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
493 u16 pi, contig_wqebbs_room;
494 int err;
495 int i;
496
497 if (rq->umem &&
498 unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
499 err = -ENOMEM;
500 goto err;
501 }
502
503 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
504 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
505 if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
506 mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
507 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
508 }
509
510 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
511 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
512
513 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
514 err = mlx5e_page_alloc(rq, dma_info);
515 if (unlikely(err))
516 goto err_unmap;
517 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
518 }
519
520 bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
521 wi->consumed_strides = 0;
522
523 umr_wqe->ctrl.opmod_idx_opcode =
524 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
525 MLX5_OPCODE_UMR);
526 umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
527
528 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
529 sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
530 sq->db.ico_wqe[pi].umr.rq = rq;
531 sq->pc += MLX5E_UMR_WQEBBS;
532
533 sq->doorbell_cseg = &umr_wqe->ctrl;
534
535 return 0;
536
537 err_unmap:
538 while (--i >= 0) {
539 dma_info--;
540 mlx5e_page_release(rq, dma_info, true);
541 }
542
543 err:
544 rq->stats->buff_alloc_err++;
545
546 return err;
547 }
548
549 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
550 {
551 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
552
553 mlx5e_free_rx_mpwqe(rq, wi, false);
554 }
555
556 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
557 {
558 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
559 u8 wqe_bulk;
560 int err;
561
562 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
563 return false;
564
565 wqe_bulk = rq->wqe.info.wqe_bulk;
566
567 if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
568 return false;
569
570 do {
571 u16 head = mlx5_wq_cyc_get_head(wq);
572
573 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
574 if (unlikely(err)) {
575 rq->stats->buff_alloc_err++;
576 break;
577 }
578
579 mlx5_wq_cyc_push_n(wq, wqe_bulk);
580 } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
581
582
583 dma_wmb();
584
585 mlx5_wq_cyc_update_db_record(wq);
586
587 return !!err;
588 }
589
590 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
591 {
592 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
593 struct mlx5_cqe64 *cqe;
594 u16 sqcc;
595 int i;
596
597 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
598 return 0;
599
600 cqe = mlx5_cqwq_get_cqe(&cq->wq);
601 if (likely(!cqe))
602 return 0;
603
604
605
606
607 sqcc = sq->cc;
608
609 i = 0;
610 do {
611 u16 wqe_counter;
612 bool last_wqe;
613
614 mlx5_cqwq_pop(&cq->wq);
615
616 wqe_counter = be16_to_cpu(cqe->wqe_counter);
617
618 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
619 netdev_WARN_ONCE(cq->channel->netdev,
620 "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
621 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
622 queue_work(cq->channel->priv->wq, &sq->recover_work);
623 break;
624 }
625 do {
626 struct mlx5e_sq_wqe_info *wi;
627 u16 ci;
628
629 last_wqe = (sqcc == wqe_counter);
630
631 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
632 wi = &sq->db.ico_wqe[ci];
633 sqcc += wi->num_wqebbs;
634
635 if (likely(wi->opcode == MLX5_OPCODE_UMR))
636 wi->umr.rq->mpwqe.umr_completed++;
637 else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
638 netdev_WARN_ONCE(cq->channel->netdev,
639 "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
640 wi->opcode);
641
642 } while (!last_wqe);
643
644 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
645
646 sq->cc = sqcc;
647
648 mlx5_cqwq_update_db_record(&cq->wq);
649
650 return i;
651 }
652
653 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
654 {
655 struct mlx5e_icosq *sq = &rq->channel->icosq;
656 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
657 u8 umr_completed = rq->mpwqe.umr_completed;
658 int alloc_err = 0;
659 u8 missing, i;
660 u16 head;
661
662 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
663 return false;
664
665 if (umr_completed) {
666 mlx5e_post_rx_mpwqe(rq, umr_completed);
667 rq->mpwqe.umr_in_progress -= umr_completed;
668 rq->mpwqe.umr_completed = 0;
669 }
670
671 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
672
673 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
674 rq->stats->congst_umr++;
675
676 #define UMR_WQE_BULK (2)
677 if (likely(missing < UMR_WQE_BULK))
678 return false;
679
680 head = rq->mpwqe.actual_wq_head;
681 i = missing;
682 do {
683 alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
684
685 if (unlikely(alloc_err))
686 break;
687 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
688 } while (--i);
689
690 rq->mpwqe.umr_last_bulk = missing - i;
691 if (sq->doorbell_cseg) {
692 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
693 sq->doorbell_cseg = NULL;
694 }
695
696 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
697 rq->mpwqe.actual_wq_head = head;
698
699
700
701
702
703
704
705 if (unlikely(alloc_err == -ENOMEM && rq->umem))
706 return true;
707
708 return false;
709 }
710
711 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
712 {
713 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
714 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
715 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
716
717 tcp->check = 0;
718 tcp->psh = get_cqe_lro_tcppsh(cqe);
719
720 if (tcp_ack) {
721 tcp->ack = 1;
722 tcp->ack_seq = cqe->lro_ack_seq_num;
723 tcp->window = cqe->lro_tcp_win;
724 }
725 }
726
727 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
728 u32 cqe_bcnt)
729 {
730 struct ethhdr *eth = (struct ethhdr *)(skb->data);
731 struct tcphdr *tcp;
732 int network_depth = 0;
733 __wsum check;
734 __be16 proto;
735 u16 tot_len;
736 void *ip_p;
737
738 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
739
740 tot_len = cqe_bcnt - network_depth;
741 ip_p = skb->data + network_depth;
742
743 if (proto == htons(ETH_P_IP)) {
744 struct iphdr *ipv4 = ip_p;
745
746 tcp = ip_p + sizeof(struct iphdr);
747 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
748
749 ipv4->ttl = cqe->lro_min_ttl;
750 ipv4->tot_len = cpu_to_be16(tot_len);
751 ipv4->check = 0;
752 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
753 ipv4->ihl);
754
755 mlx5e_lro_update_tcp_hdr(cqe, tcp);
756 check = csum_partial(tcp, tcp->doff * 4,
757 csum_unfold((__force __sum16)cqe->check_sum));
758
759 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
760 tot_len - sizeof(struct iphdr),
761 IPPROTO_TCP, check);
762 } else {
763 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
764 struct ipv6hdr *ipv6 = ip_p;
765
766 tcp = ip_p + sizeof(struct ipv6hdr);
767 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
768
769 ipv6->hop_limit = cqe->lro_min_ttl;
770 ipv6->payload_len = cpu_to_be16(payload_len);
771
772 mlx5e_lro_update_tcp_hdr(cqe, tcp);
773 check = csum_partial(tcp, tcp->doff * 4,
774 csum_unfold((__force __sum16)cqe->check_sum));
775
776 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
777 IPPROTO_TCP, check);
778 }
779 }
780
781 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
782 struct sk_buff *skb)
783 {
784 u8 cht = cqe->rss_hash_type;
785 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
786 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
787 PKT_HASH_TYPE_NONE;
788 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
789 }
790
791 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
792 __be16 *proto)
793 {
794 *proto = ((struct ethhdr *)skb->data)->h_proto;
795 *proto = __vlan_get_protocol(skb, *proto, network_depth);
796
797 if (*proto == htons(ETH_P_IP))
798 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
799
800 if (*proto == htons(ETH_P_IPV6))
801 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
802
803 return false;
804 }
805
806 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
807 {
808 int network_depth = 0;
809 __be16 proto;
810 void *ip;
811 int rc;
812
813 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
814 return;
815
816 ip = skb->data + network_depth;
817 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
818 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
819
820 rq->stats->ecn_mark += !!rc;
821 }
822
823 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
824 {
825 void *ip_p = skb->data + network_depth;
826
827 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
828 ((struct ipv6hdr *)ip_p)->nexthdr;
829 }
830
831 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
832
833 #define MAX_PADDING 8
834
835 static void
836 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
837 struct mlx5e_rq_stats *stats)
838 {
839 stats->csum_complete_tail_slow++;
840 skb->csum = csum_block_add(skb->csum,
841 skb_checksum(skb, offset, len, 0),
842 offset);
843 }
844
845 static void
846 tail_padding_csum(struct sk_buff *skb, int offset,
847 struct mlx5e_rq_stats *stats)
848 {
849 u8 tail_padding[MAX_PADDING];
850 int len = skb->len - offset;
851 void *tail;
852
853 if (unlikely(len > MAX_PADDING)) {
854 tail_padding_csum_slow(skb, offset, len, stats);
855 return;
856 }
857
858 tail = skb_header_pointer(skb, offset, len, tail_padding);
859 if (unlikely(!tail)) {
860 tail_padding_csum_slow(skb, offset, len, stats);
861 return;
862 }
863
864 stats->csum_complete_tail++;
865 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
866 }
867
868 static void
869 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
870 struct mlx5e_rq_stats *stats)
871 {
872 struct ipv6hdr *ip6;
873 struct iphdr *ip4;
874 int pkt_len;
875
876
877 if (network_depth > ETH_HLEN)
878
879
880
881
882 skb->csum = csum_partial(skb->data + ETH_HLEN,
883 network_depth - ETH_HLEN,
884 skb->csum);
885
886
887 switch (proto) {
888 case htons(ETH_P_IP):
889 ip4 = (struct iphdr *)(skb->data + network_depth);
890 pkt_len = network_depth + ntohs(ip4->tot_len);
891 break;
892 case htons(ETH_P_IPV6):
893 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
894 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
895 break;
896 default:
897 return;
898 }
899
900 if (likely(pkt_len >= skb->len))
901 return;
902
903 tail_padding_csum(skb, pkt_len, stats);
904 }
905
906 static inline void mlx5e_handle_csum(struct net_device *netdev,
907 struct mlx5_cqe64 *cqe,
908 struct mlx5e_rq *rq,
909 struct sk_buff *skb,
910 bool lro)
911 {
912 struct mlx5e_rq_stats *stats = rq->stats;
913 int network_depth = 0;
914 __be16 proto;
915
916 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
917 goto csum_none;
918
919 if (lro) {
920 skb->ip_summed = CHECKSUM_UNNECESSARY;
921 stats->csum_unnecessary++;
922 return;
923 }
924
925
926 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
927 goto csum_unnecessary;
928
929
930
931
932
933
934
935
936
937 if (short_frame(skb->len))
938 goto csum_unnecessary;
939
940 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
941 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
942 goto csum_unnecessary;
943
944 stats->csum_complete++;
945 skb->ip_summed = CHECKSUM_COMPLETE;
946 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
947
948 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
949 return;
950
951
952 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
953 return;
954 }
955
956 csum_unnecessary:
957 if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
958 (cqe->hds_ip_ext & CQE_L4_OK))) {
959 skb->ip_summed = CHECKSUM_UNNECESSARY;
960 if (cqe_is_tunneled(cqe)) {
961 skb->csum_level = 1;
962 skb->encapsulation = 1;
963 stats->csum_unnecessary_inner++;
964 return;
965 }
966 stats->csum_unnecessary++;
967 return;
968 }
969 csum_none:
970 skb->ip_summed = CHECKSUM_NONE;
971 stats->csum_none++;
972 }
973
974 #define MLX5E_CE_BIT_MASK 0x80
975
976 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
977 u32 cqe_bcnt,
978 struct mlx5e_rq *rq,
979 struct sk_buff *skb)
980 {
981 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
982 struct mlx5e_rq_stats *stats = rq->stats;
983 struct net_device *netdev = rq->netdev;
984
985 skb->mac_len = ETH_HLEN;
986
987 #ifdef CONFIG_MLX5_EN_TLS
988 mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
989 #endif
990
991 if (lro_num_seg > 1) {
992 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
993 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
994
995
996
997 stats->packets += lro_num_seg - 1;
998 stats->lro_packets++;
999 stats->lro_bytes += cqe_bcnt;
1000 }
1001
1002 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1003 skb_hwtstamps(skb)->hwtstamp =
1004 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1005
1006 skb_record_rx_queue(skb, rq->ix);
1007
1008 if (likely(netdev->features & NETIF_F_RXHASH))
1009 mlx5e_skb_set_hash(cqe, skb);
1010
1011 if (cqe_has_vlan(cqe)) {
1012 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1013 be16_to_cpu(cqe->vlan_info));
1014 stats->removed_vlan_packets++;
1015 }
1016
1017 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1018
1019 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1020
1021 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1022 mlx5e_enable_ecn(rq, skb);
1023
1024 skb->protocol = eth_type_trans(skb, netdev);
1025 }
1026
1027 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1028 struct mlx5_cqe64 *cqe,
1029 u32 cqe_bcnt,
1030 struct sk_buff *skb)
1031 {
1032 struct mlx5e_rq_stats *stats = rq->stats;
1033
1034 stats->packets++;
1035 stats->bytes += cqe_bcnt;
1036 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1037 }
1038
1039 static inline
1040 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1041 u32 frag_size, u16 headroom,
1042 u32 cqe_bcnt)
1043 {
1044 struct sk_buff *skb = build_skb(va, frag_size);
1045
1046 if (unlikely(!skb)) {
1047 rq->stats->buff_alloc_err++;
1048 return NULL;
1049 }
1050
1051 skb_reserve(skb, headroom);
1052 skb_put(skb, cqe_bcnt);
1053
1054 return skb;
1055 }
1056
1057 struct sk_buff *
1058 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1059 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1060 {
1061 struct mlx5e_dma_info *di = wi->di;
1062 u16 rx_headroom = rq->buff.headroom;
1063 struct sk_buff *skb;
1064 void *va, *data;
1065 bool consumed;
1066 u32 frag_size;
1067
1068 va = page_address(di->page) + wi->offset;
1069 data = va + rx_headroom;
1070 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1071
1072 dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
1073 frag_size, DMA_FROM_DEVICE);
1074 prefetchw(va);
1075 prefetch(data);
1076
1077 rcu_read_lock();
1078 consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
1079 rcu_read_unlock();
1080 if (consumed)
1081 return NULL;
1082
1083 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
1084 if (unlikely(!skb))
1085 return NULL;
1086
1087
1088 page_ref_inc(di->page);
1089
1090 return skb;
1091 }
1092
1093 struct sk_buff *
1094 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1095 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1096 {
1097 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1098 struct mlx5e_wqe_frag_info *head_wi = wi;
1099 u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1100 u16 frag_headlen = headlen;
1101 u16 byte_cnt = cqe_bcnt - headlen;
1102 struct sk_buff *skb;
1103
1104
1105
1106
1107 skb = napi_alloc_skb(rq->cq.napi,
1108 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1109 if (unlikely(!skb)) {
1110 rq->stats->buff_alloc_err++;
1111 return NULL;
1112 }
1113
1114 prefetchw(skb->data);
1115
1116 while (byte_cnt) {
1117 u16 frag_consumed_bytes =
1118 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
1119
1120 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
1121 frag_consumed_bytes, frag_info->frag_stride);
1122 byte_cnt -= frag_consumed_bytes;
1123 frag_headlen = 0;
1124 frag_info++;
1125 wi++;
1126 }
1127
1128
1129 mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
1130
1131 skb->tail += headlen;
1132 skb->len += headlen;
1133
1134 return skb;
1135 }
1136
1137 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1138 {
1139 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1140
1141 if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1142 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
1143 queue_work(rq->channel->priv->wq, &rq->recover_work);
1144 }
1145
1146 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1147 {
1148 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1149 struct mlx5e_wqe_frag_info *wi;
1150 struct sk_buff *skb;
1151 u32 cqe_bcnt;
1152 u16 ci;
1153
1154 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1155 wi = get_frag(rq, ci);
1156 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1157
1158 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1159 trigger_report(rq, cqe);
1160 rq->stats->wqe_err++;
1161 goto free_wqe;
1162 }
1163
1164 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1165 mlx5e_skb_from_cqe_linear,
1166 mlx5e_skb_from_cqe_nonlinear,
1167 rq, cqe, wi, cqe_bcnt);
1168 if (!skb) {
1169
1170 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1171
1172
1173
1174 goto wq_cyc_pop;
1175 }
1176 goto free_wqe;
1177 }
1178
1179 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1180 napi_gro_receive(rq->cq.napi, skb);
1181
1182 free_wqe:
1183 mlx5e_free_rx_wqe(rq, wi, true);
1184 wq_cyc_pop:
1185 mlx5_wq_cyc_pop(wq);
1186 }
1187
1188 #ifdef CONFIG_MLX5_ESWITCH
1189 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1190 {
1191 struct net_device *netdev = rq->netdev;
1192 struct mlx5e_priv *priv = netdev_priv(netdev);
1193 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1194 struct mlx5_eswitch_rep *rep = rpriv->rep;
1195 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1196 struct mlx5e_wqe_frag_info *wi;
1197 struct sk_buff *skb;
1198 u32 cqe_bcnt;
1199 u16 ci;
1200
1201 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1202 wi = get_frag(rq, ci);
1203 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1204
1205 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1206 rq->stats->wqe_err++;
1207 goto free_wqe;
1208 }
1209
1210 skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1211 if (!skb) {
1212
1213 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1214
1215
1216
1217 goto wq_cyc_pop;
1218 }
1219 goto free_wqe;
1220 }
1221
1222 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1223
1224 if (rep->vlan && skb_vlan_tag_present(skb))
1225 skb_vlan_pop(skb);
1226
1227 napi_gro_receive(rq->cq.napi, skb);
1228
1229 free_wqe:
1230 mlx5e_free_rx_wqe(rq, wi, true);
1231 wq_cyc_pop:
1232 mlx5_wq_cyc_pop(wq);
1233 }
1234 #endif
1235
1236 struct sk_buff *
1237 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1238 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1239 {
1240 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1241 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1242 u32 frag_offset = head_offset + headlen;
1243 u32 byte_cnt = cqe_bcnt - headlen;
1244 struct mlx5e_dma_info *head_di = di;
1245 struct sk_buff *skb;
1246
1247 skb = napi_alloc_skb(rq->cq.napi,
1248 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1249 if (unlikely(!skb)) {
1250 rq->stats->buff_alloc_err++;
1251 return NULL;
1252 }
1253
1254 prefetchw(skb->data);
1255
1256 if (unlikely(frag_offset >= PAGE_SIZE)) {
1257 di++;
1258 frag_offset -= PAGE_SIZE;
1259 }
1260
1261 while (byte_cnt) {
1262 u32 pg_consumed_bytes =
1263 min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1264 unsigned int truesize =
1265 ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1266
1267 mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1268 pg_consumed_bytes, truesize);
1269 byte_cnt -= pg_consumed_bytes;
1270 frag_offset = 0;
1271 di++;
1272 }
1273
1274 mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
1275
1276 skb->tail += headlen;
1277 skb->len += headlen;
1278
1279 return skb;
1280 }
1281
1282 struct sk_buff *
1283 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1284 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1285 {
1286 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1287 u16 rx_headroom = rq->buff.headroom;
1288 u32 cqe_bcnt32 = cqe_bcnt;
1289 struct sk_buff *skb;
1290 void *va, *data;
1291 u32 frag_size;
1292 bool consumed;
1293
1294
1295 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1296 rq->stats->oversize_pkts_sw_drop++;
1297 return NULL;
1298 }
1299
1300 va = page_address(di->page) + head_offset;
1301 data = va + rx_headroom;
1302 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1303
1304 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1305 frag_size, DMA_FROM_DEVICE);
1306 prefetchw(va);
1307 prefetch(data);
1308
1309 rcu_read_lock();
1310 consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
1311 rcu_read_unlock();
1312 if (consumed) {
1313 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1314 __set_bit(page_idx, wi->xdp_xmit_bitmap);
1315 return NULL;
1316 }
1317
1318 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1319 if (unlikely(!skb))
1320 return NULL;
1321
1322
1323 page_ref_inc(di->page);
1324
1325 return skb;
1326 }
1327
1328 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1329 {
1330 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
1331 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
1332 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1333 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1334 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1335 u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
1336 u32 page_idx = wqe_offset >> PAGE_SHIFT;
1337 struct mlx5e_rx_wqe_ll *wqe;
1338 struct mlx5_wq_ll *wq;
1339 struct sk_buff *skb;
1340 u16 cqe_bcnt;
1341
1342 wi->consumed_strides += cstrides;
1343
1344 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1345 trigger_report(rq, cqe);
1346 rq->stats->wqe_err++;
1347 goto mpwrq_cqe_out;
1348 }
1349
1350 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1351 struct mlx5e_rq_stats *stats = rq->stats;
1352
1353 stats->mpwqe_filler_cqes++;
1354 stats->mpwqe_filler_strides += cstrides;
1355 goto mpwrq_cqe_out;
1356 }
1357
1358 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1359
1360 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1361 mlx5e_skb_from_cqe_mpwrq_linear,
1362 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1363 rq, wi, cqe_bcnt, head_offset, page_idx);
1364 if (!skb)
1365 goto mpwrq_cqe_out;
1366
1367 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1368 napi_gro_receive(rq->cq.napi, skb);
1369
1370 mpwrq_cqe_out:
1371 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1372 return;
1373
1374 wq = &rq->mpwqe.wq;
1375 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1376 mlx5e_free_rx_mpwqe(rq, wi, true);
1377 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1378 }
1379
1380 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1381 {
1382 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1383 struct mlx5_cqwq *cqwq = &cq->wq;
1384 struct mlx5_cqe64 *cqe;
1385 int work_done = 0;
1386
1387 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1388 return 0;
1389
1390 if (rq->cqd.left) {
1391 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
1392 if (rq->cqd.left || work_done >= budget)
1393 goto out;
1394 }
1395
1396 cqe = mlx5_cqwq_get_cqe(cqwq);
1397 if (!cqe) {
1398 if (unlikely(work_done))
1399 goto out;
1400 return 0;
1401 }
1402
1403 do {
1404 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1405 work_done +=
1406 mlx5e_decompress_cqes_start(rq, cqwq,
1407 budget - work_done);
1408 continue;
1409 }
1410
1411 mlx5_cqwq_pop(cqwq);
1412
1413 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
1414 mlx5e_handle_rx_cqe, rq, cqe);
1415 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
1416
1417 out:
1418 if (rq->xdp_prog)
1419 mlx5e_xdp_rx_poll_complete(rq);
1420
1421 mlx5_cqwq_update_db_record(cqwq);
1422
1423
1424 wmb();
1425
1426 return work_done;
1427 }
1428
1429 #ifdef CONFIG_MLX5_CORE_IPOIB
1430
1431 #define MLX5_IB_GRH_DGID_OFFSET 24
1432 #define MLX5_GID_SIZE 16
1433
1434 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1435 struct mlx5_cqe64 *cqe,
1436 u32 cqe_bcnt,
1437 struct sk_buff *skb)
1438 {
1439 struct hwtstamp_config *tstamp;
1440 struct mlx5e_rq_stats *stats;
1441 struct net_device *netdev;
1442 struct mlx5e_priv *priv;
1443 char *pseudo_header;
1444 u32 qpn;
1445 u8 *dgid;
1446 u8 g;
1447
1448 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1449 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1450
1451
1452
1453
1454 if (unlikely(!netdev)) {
1455
1456 skb->dev = NULL;
1457 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1458 return;
1459 }
1460
1461 priv = mlx5i_epriv(netdev);
1462 tstamp = &priv->tstamp;
1463 stats = &priv->channel_stats[rq->ix].rq;
1464
1465 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
1466 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1467 if ((!g) || dgid[0] != 0xff)
1468 skb->pkt_type = PACKET_HOST;
1469 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1470 skb->pkt_type = PACKET_BROADCAST;
1471 else
1472 skb->pkt_type = PACKET_MULTICAST;
1473
1474
1475
1476
1477
1478 skb_pull(skb, MLX5_IB_GRH_BYTES);
1479
1480 skb->protocol = *((__be16 *)(skb->data));
1481
1482 if (netdev->features & NETIF_F_RXCSUM) {
1483 skb->ip_summed = CHECKSUM_COMPLETE;
1484 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1485 stats->csum_complete++;
1486 } else {
1487 skb->ip_summed = CHECKSUM_NONE;
1488 stats->csum_none++;
1489 }
1490
1491 if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1492 skb_hwtstamps(skb)->hwtstamp =
1493 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1494
1495 skb_record_rx_queue(skb, rq->ix);
1496
1497 if (likely(netdev->features & NETIF_F_RXHASH))
1498 mlx5e_skb_set_hash(cqe, skb);
1499
1500
1501 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1502 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1503 skb_reset_mac_header(skb);
1504 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1505
1506 skb->dev = netdev;
1507
1508 stats->packets++;
1509 stats->bytes += cqe_bcnt;
1510 }
1511
1512 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1513 {
1514 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1515 struct mlx5e_wqe_frag_info *wi;
1516 struct sk_buff *skb;
1517 u32 cqe_bcnt;
1518 u16 ci;
1519
1520 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1521 wi = get_frag(rq, ci);
1522 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1523
1524 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1525 rq->stats->wqe_err++;
1526 goto wq_free_wqe;
1527 }
1528
1529 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1530 mlx5e_skb_from_cqe_linear,
1531 mlx5e_skb_from_cqe_nonlinear,
1532 rq, cqe, wi, cqe_bcnt);
1533 if (!skb)
1534 goto wq_free_wqe;
1535
1536 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1537 if (unlikely(!skb->dev)) {
1538 dev_kfree_skb_any(skb);
1539 goto wq_free_wqe;
1540 }
1541 napi_gro_receive(rq->cq.napi, skb);
1542
1543 wq_free_wqe:
1544 mlx5e_free_rx_wqe(rq, wi, true);
1545 mlx5_wq_cyc_pop(wq);
1546 }
1547
1548 #endif
1549
1550 #ifdef CONFIG_MLX5_EN_IPSEC
1551
1552 void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1553 {
1554 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1555 struct mlx5e_wqe_frag_info *wi;
1556 struct sk_buff *skb;
1557 u32 cqe_bcnt;
1558 u16 ci;
1559
1560 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1561 wi = get_frag(rq, ci);
1562 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1563
1564 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1565 rq->stats->wqe_err++;
1566 goto wq_free_wqe;
1567 }
1568
1569 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1570 mlx5e_skb_from_cqe_linear,
1571 mlx5e_skb_from_cqe_nonlinear,
1572 rq, cqe, wi, cqe_bcnt);
1573 if (unlikely(!skb))
1574 goto wq_free_wqe;
1575
1576 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
1577 if (unlikely(!skb))
1578 goto wq_free_wqe;
1579
1580 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1581 napi_gro_receive(rq->cq.napi, skb);
1582
1583 wq_free_wqe:
1584 mlx5e_free_rx_wqe(rq, wi, true);
1585 mlx5_wq_cyc_pop(wq);
1586 }
1587
1588 #endif