Lines Matching refs:fp
92 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
93 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
182 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp() local
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
345 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, in bnx2x_update_last_max_sge() argument
348 u16 last_max = fp->last_max_sge; in bnx2x_update_last_max_sge()
351 fp->last_max_sge = idx; in bnx2x_update_last_max_sge()
354 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, in bnx2x_update_sge_prod() argument
358 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod()
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask, in bnx2x_update_sge_prod()
375 prefetch((void *)(fp->sge_mask)); in bnx2x_update_sge_prod()
376 bnx2x_update_last_max_sge(fp, in bnx2x_update_sge_prod()
379 last_max = RX_SGE(fp->last_max_sge); in bnx2x_update_sge_prod()
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; in bnx2x_update_sge_prod()
389 if (likely(fp->sge_mask[i])) in bnx2x_update_sge_prod()
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; in bnx2x_update_sge_prod()
397 fp->rx_sge_prod += delta; in bnx2x_update_sge_prod()
399 bnx2x_clear_sge_mask_next_elems(fp); in bnx2x_update_sge_prod()
404 fp->last_max_sge, fp->rx_sge_prod); in bnx2x_update_sge_prod()
430 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, in bnx2x_tpa_start() argument
434 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start()
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; in bnx2x_tpa_start()
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; in bnx2x_tpa_start()
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; in bnx2x_tpa_start()
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; in bnx2x_tpa_start()
449 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_start()
458 bnx2x_reuse_rx_data(fp, cons, prod); in bnx2x_tpa_start()
481 if (fp->mode == TPA_MODE_GRO) { in bnx2x_tpa_start()
488 fp->tpa_queue_used |= (1 << queue); in bnx2x_tpa_start()
490 fp->tpa_queue_used); in bnx2x_tpa_start()
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; in bnx2x_alloc_rx_sge()
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; in bnx2x_alloc_rx_sge()
551 struct bnx2x_alloc_pool *pool = &fp->page_pool; in bnx2x_alloc_rx_sge()
592 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
607 if (fp->mode == TPA_MODE_GRO) { in bnx2x_fill_frag_skb()
634 if (fp->mode == TPA_MODE_GRO) in bnx2x_fill_frag_skb()
639 rx_pg = &fp->rx_page_ring[sge_idx]; in bnx2x_fill_frag_skb()
644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
654 if (fp->mode == TPA_MODE_LRO) in bnx2x_fill_frag_skb()
682 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) in bnx2x_frag_free() argument
684 if (fp->rx_frag_size) in bnx2x_frag_free()
690 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) in bnx2x_frag_alloc() argument
692 if (fp->rx_frag_size) { in bnx2x_frag_alloc()
697 return netdev_alloc_frag(fp->rx_frag_size); in bnx2x_frag_alloc()
700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc()
737 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
755 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_gro_receive()
756 napi_gro_receive(&fp->napi, skb); in bnx2x_gro_receive()
759 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
781 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); in bnx2x_tpa_stop()
786 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_tpa_stop()
788 skb = build_skb(data, fp->rx_frag_size); in bnx2x_tpa_stop()
792 if (pad + len > fp->rx_buf_size) { in bnx2x_tpa_stop()
794 pad, len, fp->rx_buf_size); in bnx2x_tpa_stop()
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
811 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
824 bnx2x_frag_free(fp, new_data); in bnx2x_tpa_stop()
829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
832 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
836 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; in bnx2x_alloc_rx_data()
837 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; in bnx2x_alloc_rx_data()
840 data = bnx2x_frag_alloc(fp, gfp_mask); in bnx2x_alloc_rx_data()
845 fp->rx_buf_size, in bnx2x_alloc_rx_data()
848 bnx2x_frag_free(fp, data); in bnx2x_alloc_rx_data()
864 struct bnx2x_fastpath *fp, in bnx2x_csum_validate() argument
886 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) in bnx2x_rx_int() argument
888 struct bnx2x *bp = fp->bp; in bnx2x_rx_int()
902 bd_cons = fp->rx_bd_cons; in bnx2x_rx_int()
903 bd_prod = fp->rx_bd_prod; in bnx2x_rx_int()
905 sw_comp_cons = fp->rx_comp_cons; in bnx2x_rx_int()
906 sw_comp_prod = fp->rx_comp_prod; in bnx2x_rx_int()
909 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
913 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); in bnx2x_rx_int()
958 bnx2x_sp_event(fp, cqe); in bnx2x_rx_int()
962 rx_buf = &fp->rx_buf_ring[bd_cons]; in bnx2x_rx_int()
970 if (fp->mode == TPA_MODE_DISABLED && in bnx2x_rx_int()
983 bnx2x_tpa_start(fp, queue, in bnx2x_rx_int()
990 tpa_info = &fp->tpa_info[queue]; in bnx2x_rx_int()
998 if (fp->mode == TPA_MODE_GRO) in bnx2x_rx_int()
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
1012 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); in bnx2x_rx_int()
1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1038 skb = napi_alloc_skb(&fp->napi, len); in bnx2x_rx_int()
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1052 fp->rx_buf_size, in bnx2x_rx_int()
1054 skb = build_skb(data, fp->rx_frag_size); in bnx2x_rx_int()
1056 bnx2x_frag_free(fp, data); in bnx2x_rx_int()
1057 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1067 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); in bnx2x_rx_int()
1082 bnx2x_csum_validate(skb, cqe, fp, in bnx2x_rx_int()
1083 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1085 skb_record_rx_queue(skb, fp->rx_queue); in bnx2x_rx_int()
1097 skb_mark_napi_id(skb, &fp->napi); in bnx2x_rx_int()
1099 if (bnx2x_fp_ll_polling(fp)) in bnx2x_rx_int()
1102 napi_gro_receive(&fp->napi, skb); in bnx2x_rx_int()
1121 cqe = &fp->rx_comp_ring[comp_ring_cons]; in bnx2x_rx_int()
1125 fp->rx_bd_cons = bd_cons; in bnx2x_rx_int()
1126 fp->rx_bd_prod = bd_prod_fw; in bnx2x_rx_int()
1127 fp->rx_comp_cons = sw_comp_cons; in bnx2x_rx_int()
1128 fp->rx_comp_prod = sw_comp_prod; in bnx2x_rx_int()
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1132 fp->rx_sge_prod); in bnx2x_rx_int()
1134 fp->rx_pkt += rx_pkt; in bnx2x_rx_int()
1135 fp->rx_calls++; in bnx2x_rx_int()
1142 struct bnx2x_fastpath *fp = fp_cookie; in bnx2x_msix_fp_int() local
1143 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int()
1148 fp->index, fp->fw_sb_id, fp->igu_sb_id); in bnx2x_msix_fp_int()
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1158 for_each_cos_in_tx_queue(fp, cos) in bnx2x_msix_fp_int()
1159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); in bnx2x_msix_fp_int()
1161 prefetch(&fp->sb_running_index[SM_RX_ID]); in bnx2x_msix_fp_int()
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1346 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_sgl() argument
1353 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; in bnx2x_set_next_page_sgl()
1355 cpu_to_le32(U64_HI(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1359 cpu_to_le32(U64_LO(fp->rx_sge_mapping + in bnx2x_set_next_page_sgl()
1365 struct bnx2x_fastpath *fp, int last) in bnx2x_free_tpa_pool() argument
1370 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; in bnx2x_free_tpa_pool()
1381 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_tpa_pool()
1382 bnx2x_frag_free(fp, data); in bnx2x_free_tpa_pool()
1392 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic() local
1394 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings_cnic()
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1402 fp->rx_sge_prod); in bnx2x_init_rx_rings_cnic()
1414 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1419 if (fp->mode != TPA_MODE_DISABLED) { in bnx2x_init_rx_rings()
1423 &fp->tpa_info[i]; in bnx2x_init_rx_rings()
1428 bnx2x_frag_alloc(fp, GFP_KERNEL); in bnx2x_init_rx_rings()
1432 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1433 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1441 bnx2x_set_next_page_sgl(fp); in bnx2x_init_rx_rings()
1444 bnx2x_init_sge_ring_bit_mask(fp); in bnx2x_init_rx_rings()
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1457 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1459 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1461 fp->mode = TPA_MODE_DISABLED; in bnx2x_init_rx_rings()
1468 fp->rx_sge_prod = ring_prod; in bnx2x_init_rx_rings()
1473 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings() local
1475 fp->rx_bd_cons = 0; in bnx2x_init_rx_rings()
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1483 fp->rx_sge_prod); in bnx2x_init_rx_rings()
1491 U64_LO(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1494 U64_HI(fp->rx_comp_mapping)); in bnx2x_init_rx_rings()
1499 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) in bnx2x_free_tx_skbs_queue() argument
1502 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue()
1504 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_tx_skbs_queue()
1505 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_tx_skbs_queue()
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1541 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) in bnx2x_free_rx_bds() argument
1543 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds()
1547 if (fp->rx_buf_ring == NULL) in bnx2x_free_rx_bds()
1551 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; in bnx2x_free_rx_bds()
1558 fp->rx_buf_size, DMA_FROM_DEVICE); in bnx2x_free_rx_bds()
1561 bnx2x_frag_free(fp, data); in bnx2x_free_rx_bds()
1570 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1579 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs() local
1581 bnx2x_free_rx_bds(fp); in bnx2x_free_rx_skbs()
1583 if (fp->mode != TPA_MODE_DISABLED) in bnx2x_free_rx_skbs()
1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1772 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs() local
1773 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", in bnx2x_req_msix_irqs()
1777 bnx2x_msix_fp_int, 0, fp->name, fp); in bnx2x_req_msix_irqs()
1872 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable_cnic()
1882 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable()
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable_cnic()
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable()
2028 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size() local
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + in bnx2x_set_rx_buf_size()
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) in bnx2x_set_rx_buf_size()
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; in bnx2x_set_rx_buf_size()
2051 fp->rx_frag_size = 0; in bnx2x_set_rx_buf_size()
2065 bp->fp->cl_id + in bnx2x_init_rss()
2472 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp() local
2474 struct napi_struct orig_napi = fp->napi; in bnx2x_bz_fp()
2475 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; in bnx2x_bz_fp()
2478 if (fp->tpa_info) in bnx2x_bz_fp()
2479 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * in bnx2x_bz_fp()
2481 memset(fp, 0, sizeof(*fp)); in bnx2x_bz_fp()
2484 fp->napi = orig_napi; in bnx2x_bz_fp()
2485 fp->tpa_info = orig_tpa_info; in bnx2x_bz_fp()
2486 fp->bp = bp; in bnx2x_bz_fp()
2487 fp->index = index; in bnx2x_bz_fp()
2488 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2489 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2492 fp->max_cos = 1; in bnx2x_bz_fp()
2495 if (IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2497 if (IS_ETH_FP(fp)) in bnx2x_bz_fp()
2498 for_each_cos_in_tx_queue(fp, cos) in bnx2x_bz_fp()
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2506 fp->mode = TPA_MODE_LRO; in bnx2x_bz_fp()
2509 fp->mode = TPA_MODE_GRO; in bnx2x_bz_fp()
2511 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2516 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2517 fp->mode = TPA_MODE_DISABLED; in bnx2x_bz_fp()
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2976 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues() local
2978 for_each_cos_in_tx_queue(fp, cos) in bnx2x_drain_tx_queues()
2979 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3224 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, in bnx2x_poll() local
3226 struct bnx2x *bp = fp->bp; in bnx2x_poll()
3235 if (!bnx2x_fp_lock_napi(fp)) in bnx2x_poll()
3238 for_each_cos_in_tx_queue(fp, cos) in bnx2x_poll()
3239 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) in bnx2x_poll()
3240 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3242 if (bnx2x_has_rx_work(fp)) { in bnx2x_poll()
3243 work_done += bnx2x_rx_int(fp, budget - work_done); in bnx2x_poll()
3247 bnx2x_fp_unlock_napi(fp); in bnx2x_poll()
3252 bnx2x_fp_unlock_napi(fp); in bnx2x_poll()
3255 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { in bnx2x_poll()
3261 if (IS_FCOE_FP(fp)) { in bnx2x_poll()
3265 bnx2x_update_fpsb_idx(fp); in bnx2x_poll()
3281 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { in bnx2x_poll()
3285 "Update index to %d\n", fp->fp_hc_idx); in bnx2x_poll()
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3287 le16_to_cpu(fp->fp_hc_idx), in bnx2x_poll()
3301 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, in bnx2x_low_latency_recv() local
3303 struct bnx2x *bp = fp->bp; in bnx2x_low_latency_recv()
3311 if (!bnx2x_fp_lock_poll(fp)) in bnx2x_low_latency_recv()
3314 if (bnx2x_has_rx_work(fp)) in bnx2x_low_latency_recv()
3315 found = bnx2x_rx_int(fp, 4); in bnx2x_low_latency_recv()
3317 bnx2x_fp_unlock_poll(fp); in bnx2x_low_latency_recv()
4372 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at() local
4379 fp->status_blk_mapping = 0; in bnx2x_free_fp_mem_at()
4396 bnx2x_free_rx_bds(fp); in bnx2x_free_fp_mem_at()
4419 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_free_fp_mem_at()
4420 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_free_fp_mem_at()
4466 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_bds() argument
4469 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds()
4473 fp->rx_comp_cons = 0; in bnx2x_alloc_rx_bds()
4480 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4491 i - failure_cnt, fp->index); in bnx2x_alloc_rx_bds()
4493 fp->rx_bd_prod = ring_prod; in bnx2x_alloc_rx_bds()
4495 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, in bnx2x_alloc_rx_bds()
4497 fp->rx_pkt = fp->rx_calls = 0; in bnx2x_alloc_rx_bds()
4499 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4504 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) in bnx2x_set_next_page_rx_cq() argument
4512 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; in bnx2x_set_next_page_rx_cq()
4514 cpu_to_le32(U64_HI(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4517 cpu_to_le32(U64_LO(fp->rx_comp_mapping + in bnx2x_set_next_page_rx_cq()
4525 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at() local
4584 for_each_cos_in_tx_queue(fp, cos) { in bnx2x_alloc_fp_mem_at()
4585 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; in bnx2x_alloc_fp_mem_at()
4635 bnx2x_set_next_page_rx_bd(fp); in bnx2x_alloc_fp_mem_at()
4638 bnx2x_set_next_page_rx_cq(fp); in bnx2x_alloc_fp_mem_at()
4641 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); in bnx2x_alloc_fp_mem_at()
4656 if (ring_size < (fp->mode == TPA_MODE_DISABLED ? in bnx2x_alloc_fp_mem_at()
4724 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4725 kfree(bp->fp); in bnx2x_free_mem_bp()
4735 struct bnx2x_fastpath *fp; in bnx2x_alloc_mem_bp() local
4756 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4757 if (!fp) in bnx2x_alloc_mem_bp()
4760 fp[i].tpa_info = in bnx2x_alloc_mem_bp()
4763 if (!(fp[i].tpa_info)) in bnx2x_alloc_mem_bp()
4767 bp->fp = fp; in bnx2x_alloc_mem_bp()