Lines Matching refs:fp
90 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
91 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
180 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp() local
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, in bnx2x_update_last_max_sge() argument
346 u16 last_max = fp->last_max_sge; in bnx2x_update_last_max_sge()
349 fp->last_max_sge = idx; in bnx2x_update_last_max_sge()
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, in bnx2x_update_sge_prod() argument
356 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod()
366 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bnx2x_update_sge_prod()
373 prefetch((void *)(fp->sge_mask)); in bnx2x_update_sge_prod()
374 bnx2x_update_last_max_sge(fp, in bnx2x_update_sge_prod()
377 last_max = RX_SGE(fp->last_max_sge); in bnx2x_update_sge_prod()
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bnx2x_update_sge_prod()
387 if (likely(fp->sge_mask[i])) in bnx2x_update_sge_prod()
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bnx2x_update_sge_prod()
395 fp->rx_sge_prod += delta; in bnx2x_update_sge_prod()
397 bnx2x_clear_sge_mask_next_elems(fp); in bnx2x_update_sge_prod()
402 fp->last_max_sge, fp->rx_sge_prod); in bnx2x_update_sge_prod()
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, in bnx2x_tpa_start() argument
432 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start()
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_tpa_start()
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_tpa_start()
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_tpa_start()
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; in bnx2x_tpa_start()
447 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_start()
456 bnx2x_reuse_rx_data(fp, cons, prod); in bnx2x_tpa_start()
479 if (fp->mode == TPA_MODE_GRO) { in bnx2x_tpa_start()
486 fp->tpa_queue_used |= (1 << queue); in bnx2x_tpa_start()
488 fp->tpa_queue_used); in bnx2x_tpa_start()
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
548 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_alloc_rx_sge()
549 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_alloc_rx_sge()
574 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
589 if (fp->mode == TPA_MODE_GRO) { in bnx2x_fill_frag_skb()
616 if (fp->mode == TPA_MODE_GRO) in bnx2x_fill_frag_skb()
621 rx_pg = &fp->rx_page_ring[sge_idx]; in bnx2x_fill_frag_skb()
626 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
628 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
637 if (fp->mode == TPA_MODE_LRO) in bnx2x_fill_frag_skb()
662 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) in bnx2x_frag_free() argument
664 if (fp->rx_frag_size) in bnx2x_frag_free()
670 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) in bnx2x_frag_alloc() argument
672 if (fp->rx_frag_size) { in bnx2x_frag_alloc()
677 return netdev_alloc_frag(fp->rx_frag_size); in bnx2x_frag_alloc()
680 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc()
717 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
735 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_gro_receive()
736 napi_gro_receive(&fp->napi, skb); in bnx2x_gro_receive()
739 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
761 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); in bnx2x_tpa_stop()
766 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_stop()
768 skb = build_skb(data, fp->rx_frag_size); in bnx2x_tpa_stop()
772 if (pad + len > fp->rx_buf_size) { in bnx2x_tpa_stop()
774 pad, len, fp->rx_buf_size); in bnx2x_tpa_stop()
787 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
791 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
804 bnx2x_frag_free(fp, new_data); in bnx2x_tpa_stop()
809 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
812 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
816 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; in bnx2x_alloc_rx_data()
817 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; in bnx2x_alloc_rx_data()
820 data = bnx2x_frag_alloc(fp, gfp_mask); in bnx2x_alloc_rx_data()
825 fp->rx_buf_size, in bnx2x_alloc_rx_data()
828 bnx2x_frag_free(fp, data); in bnx2x_alloc_rx_data()
844 struct bnx2x_fastpath *fp, in bnx2x_csum_validate() argument
866 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) in bnx2x_rx_int() argument
868 struct bnx2x *bp = fp->bp; in bnx2x_rx_int()
882 bd_cons = fp->rx_bd_cons; in bnx2x_rx_int()
883 bd_prod = fp->rx_bd_prod; in bnx2x_rx_int()
885 sw_comp_cons = fp->rx_comp_cons; in bnx2x_rx_int()
886 sw_comp_prod = fp->rx_comp_prod; in bnx2x_rx_int()
889 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
893 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); in bnx2x_rx_int()
938 bnx2x_sp_event(fp, cqe); in bnx2x_rx_int()
942 rx_buf = &fp->rx_buf_ring[bd_cons]; in bnx2x_rx_int()
950 if (fp->mode == TPA_MODE_DISABLED && in bnx2x_rx_int()
963 bnx2x_tpa_start(fp, queue, in bnx2x_rx_int()
970 tpa_info = &fp->tpa_info[queue]; in bnx2x_rx_int()
978 if (fp->mode == TPA_MODE_GRO) in bnx2x_rx_int()
985 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
992 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); in bnx2x_rx_int()
1009 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1018 skb = napi_alloc_skb(&fp->napi, len); in bnx2x_rx_int()
1022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1026 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1028 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1032 fp->rx_buf_size, in bnx2x_rx_int()
1034 skb = build_skb(data, fp->rx_frag_size); in bnx2x_rx_int()
1036 bnx2x_frag_free(fp, data); in bnx2x_rx_int()
1037 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1047 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1062 bnx2x_csum_validate(skb, cqe, fp, in bnx2x_rx_int()
1063 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1065 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_rx_int()
1077 skb_mark_napi_id(skb, &fp->napi); in bnx2x_rx_int()
1079 if (bnx2x_fp_ll_polling(fp)) in bnx2x_rx_int()
1082 napi_gro_receive(&fp->napi, skb); in bnx2x_rx_int()
1101 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
1105 fp->rx_bd_cons = bd_cons; in bnx2x_rx_int()
1106 fp->rx_bd_prod = bd_prod_fw; in bnx2x_rx_int()
1107 fp->rx_comp_cons = sw_comp_cons; in bnx2x_rx_int()
1108 fp->rx_comp_prod = sw_comp_prod; in bnx2x_rx_int()
1111 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1112 fp->rx_sge_prod); in bnx2x_rx_int()
1114 fp->rx_pkt += rx_pkt; in bnx2x_rx_int()
1115 fp->rx_calls++; in bnx2x_rx_int()
1122 struct bnx2x_fastpath *fp = fp_cookie; in bnx2x_msix_fp_int() local
1123 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int()
1128 fp->index, fp->fw_sb_id, fp->igu_sb_id); in bnx2x_msix_fp_int()
1130 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1138 for_each_cos_in_tx_queue(fp, cos) in bnx2x_msix_fp_int()
1139 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_msix_fp_int()
1141 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_msix_fp_int()
1142 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1326 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_sgl() argument
1333 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; in bnx2x_set_next_page_sgl()
1335 cpu_to_le32(U64_HI(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1339 cpu_to_le32(U64_LO(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1345 struct bnx2x_fastpath *fp, int last) in bnx2x_free_tpa_pool() argument
1350 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; in bnx2x_free_tpa_pool()
1361 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_tpa_pool()
1362 bnx2x_frag_free(fp, data); in bnx2x_free_tpa_pool()
1372 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic() local
1374 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings_cnic()
1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1382 fp->rx_sge_prod); in bnx2x_init_rx_rings_cnic()
1394 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1399 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_init_rx_rings()
1403 &fp->tpa_info[i]; in bnx2x_init_rx_rings()
1408 bnx2x_frag_alloc(fp, GFP_KERNEL); in bnx2x_init_rx_rings()
1412 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1413 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1421 bnx2x_set_next_page_sgl(fp); in bnx2x_init_rx_rings()
1424 bnx2x_init_sge_ring_bit_mask(fp); in bnx2x_init_rx_rings()
1430 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1437 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1439 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1441 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1448 fp->rx_sge_prod = ring_prod; in bnx2x_init_rx_rings()
1453 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1455 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings()
1462 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1463 fp->rx_sge_prod); in bnx2x_init_rx_rings()
1471 U64_LO(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1474 U64_HI(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1479 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) in bnx2x_free_tx_skbs_queue() argument
1482 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue()
1484 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_tx_skbs_queue()
1485 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_tx_skbs_queue()
1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1521 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) in bnx2x_free_rx_bds() argument
1523 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds()
1527 if (fp->rx_buf_ring == NULL) in bnx2x_free_rx_bds()
1531 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; in bnx2x_free_rx_bds()
1538 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_rx_bds()
1541 bnx2x_frag_free(fp, data); in bnx2x_free_rx_bds()
1550 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1559 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs() local
1561 bnx2x_free_rx_bds(fp); in bnx2x_free_rx_skbs()
1563 if (fp->mode != TPA_MODE_DISABLED) in bnx2x_free_rx_skbs()
1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1630 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1752 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs() local
1753 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", in bnx2x_req_msix_irqs()
1757 bnx2x_msix_fp_int, 0, fp->name, fp); in bnx2x_req_msix_irqs()
1852 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable_cnic()
1862 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable()
1873 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable_cnic()
1884 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable()
2008 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size() local
2022 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + in bnx2x_set_rx_buf_size()
2028 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) in bnx2x_set_rx_buf_size()
2029 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; in bnx2x_set_rx_buf_size()
2031 fp->rx_frag_size = 0; in bnx2x_set_rx_buf_size()
2045 bp->fp->cl_id + in bnx2x_init_rss()
2447 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp() local
2449 struct napi_struct orig_napi = fp->napi; in bnx2x_bz_fp()
2450 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; in bnx2x_bz_fp()
2453 if (fp->tpa_info) in bnx2x_bz_fp()
2454 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * in bnx2x_bz_fp()
2456 memset(fp, 0, sizeof(*fp)); in bnx2x_bz_fp()
2459 fp->napi = orig_napi; in bnx2x_bz_fp()
2460 fp->tpa_info = orig_tpa_info; in bnx2x_bz_fp()
2461 fp->bp = bp; in bnx2x_bz_fp()
2462 fp->index = index; in bnx2x_bz_fp()
2463 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2464 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2467 fp->max_cos = 1; in bnx2x_bz_fp()
2470 if (IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2471 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2472 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2473 for_each_cos_in_tx_queue(fp, cos) in bnx2x_bz_fp()
2474 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2481 fp->mode = TPA_MODE_LRO; in bnx2x_bz_fp()
2484 fp->mode = TPA_MODE_GRO; in bnx2x_bz_fp()
2486 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2491 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2492 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2541 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2768 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2770 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2791 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2890 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2923 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues() local
2925 for_each_cos_in_tx_queue(fp, cos) in bnx2x_drain_tx_queues()
2926 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
3065 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3168 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, in bnx2x_poll() local
3170 struct bnx2x *bp = fp->bp; in bnx2x_poll()
3179 if (!bnx2x_fp_lock_napi(fp)) in bnx2x_poll()
3182 for_each_cos_in_tx_queue(fp, cos) in bnx2x_poll()
3183 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_poll()
3184 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3186 if (bnx2x_has_rx_work(fp)) { in bnx2x_poll()
3187 work_done += bnx2x_rx_int(fp, budget - work_done); in bnx2x_poll()
3191 bnx2x_fp_unlock_napi(fp); in bnx2x_poll()
3196 bnx2x_fp_unlock_napi(fp); in bnx2x_poll()
3199 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { in bnx2x_poll()
3205 if (IS_FCOE_FP(fp)) { in bnx2x_poll()
3209 bnx2x_update_fpsb_idx(fp); in bnx2x_poll()
3225 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { in bnx2x_poll()
3229 "Update index to %d\n", fp->fp_hc_idx); in bnx2x_poll()
3230 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3231 le16_to_cpu(fp->fp_hc_idx), in bnx2x_poll()
3245 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, in bnx2x_low_latency_recv() local
3247 struct bnx2x *bp = fp->bp; in bnx2x_low_latency_recv()
3255 if (!bnx2x_fp_lock_poll(fp)) in bnx2x_low_latency_recv()
3258 if (bnx2x_has_rx_work(fp)) in bnx2x_low_latency_recv()
3259 found = bnx2x_rx_int(fp, 4); in bnx2x_low_latency_recv()
3261 bnx2x_fp_unlock_poll(fp); in bnx2x_low_latency_recv()
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at() local
4271 fp->status_blk_mapping = 0; in bnx2x_free_fp_mem_at()
4288 bnx2x_free_rx_bds(fp); in bnx2x_free_fp_mem_at()
4311 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_fp_mem_at()
4312 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_fp_mem_at()
4358 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_bds() argument
4361 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds()
4365 fp->rx_comp_cons = 0; in bnx2x_alloc_rx_bds()
4372 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4383 i - failure_cnt, fp->index); in bnx2x_alloc_rx_bds()
4385 fp->rx_bd_prod = ring_prod; in bnx2x_alloc_rx_bds()
4387 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, in bnx2x_alloc_rx_bds()
4389 fp->rx_pkt = fp->rx_calls = 0; in bnx2x_alloc_rx_bds()
4391 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4396 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_rx_cq() argument
4404 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; in bnx2x_set_next_page_rx_cq()
4406 cpu_to_le32(U64_HI(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4409 cpu_to_le32(U64_LO(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4417 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at() local
4476 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_alloc_fp_mem_at()
4477 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_alloc_fp_mem_at()
4527 bnx2x_set_next_page_rx_bd(fp); in bnx2x_alloc_fp_mem_at()
4530 bnx2x_set_next_page_rx_cq(fp); in bnx2x_alloc_fp_mem_at()
4533 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); in bnx2x_alloc_fp_mem_at()
4548 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? in bnx2x_alloc_fp_mem_at()
4616 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4617 kfree(bp->fp); in bnx2x_free_mem_bp()
4627 struct bnx2x_fastpath *fp; in bnx2x_alloc_mem_bp() local
4648 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4649 if (!fp) in bnx2x_alloc_mem_bp()
4652 fp[i].tpa_info = in bnx2x_alloc_mem_bp()
4655 if (!(fp[i].tpa_info)) in bnx2x_alloc_mem_bp()
4659 bp->fp = fp; in bnx2x_alloc_mem_bp()