Lines Matching refs:bp

36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) in bnx2x_add_all_napi_cnic() argument
46 for_each_rx_queue_cnic(bp, i) { in bnx2x_add_all_napi_cnic()
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), in bnx2x_add_all_napi_cnic()
49 napi_hash_add(&bnx2x_fp(bp, i, napi)); in bnx2x_add_all_napi_cnic()
53 static void bnx2x_add_all_napi(struct bnx2x *bp) in bnx2x_add_all_napi() argument
58 for_each_eth_queue(bp, i) { in bnx2x_add_all_napi()
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), in bnx2x_add_all_napi()
61 napi_hash_add(&bnx2x_fp(bp, i, napi)); in bnx2x_add_all_napi()
65 static int bnx2x_calc_num_queues(struct bnx2x *bp) in bnx2x_calc_num_queues() argument
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); in bnx2x_calc_num_queues()
90 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) in bnx2x_move_fp() argument
92 struct bnx2x_fastpath *from_fp = &bp->fp[from]; in bnx2x_move_fp()
93 struct bnx2x_fastpath *to_fp = &bp->fp[to]; in bnx2x_move_fp()
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; in bnx2x_move_fp()
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; in bnx2x_move_fp()
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; in bnx2x_move_fp()
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; in bnx2x_move_fp()
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; in bnx2x_move_fp()
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * in bnx2x_move_fp()
127 (bp)->max_cos; in bnx2x_move_fp()
128 if (from == FCOE_IDX(bp)) { in bnx2x_move_fp()
133 memcpy(&bp->bnx2x_txq[new_txdata_index], in bnx2x_move_fp()
134 &bp->bnx2x_txq[old_txdata_index], in bnx2x_move_fp()
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; in bnx2x_move_fp()
147 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) in bnx2x_fill_fw_str() argument
149 if (IS_PF(bp)) { in bnx2x_fill_fw_str()
153 bnx2x_get_ext_phy_fw_version(&bp->link_params, in bnx2x_fill_fw_str()
155 strlcpy(buf, bp->fw_ver, buf_len); in bnx2x_fill_fw_str()
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), in bnx2x_fill_fw_str()
158 (bp->common.bc_ver & 0xff0000) >> 16, in bnx2x_fill_fw_str()
159 (bp->common.bc_ver & 0xff00) >> 8, in bnx2x_fill_fw_str()
160 (bp->common.bc_ver & 0xff), in bnx2x_fill_fw_str()
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len); in bnx2x_fill_fw_str()
173 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) in bnx2x_shrink_eth_fp() argument
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_shrink_eth_fp()
180 for (cos = 1; cos < bp->max_cos; cos++) { in bnx2x_shrink_eth_fp()
182 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_shrink_eth_fp()
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], in bnx2x_shrink_eth_fp()
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; in bnx2x_shrink_eth_fp()
197 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, in bnx2x_free_tx_pkt() argument
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), in bnx2x_free_tx_pkt()
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in bnx2x_free_tx_pkt()
276 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) in bnx2x_tx_int() argument
283 if (unlikely(bp->panic)) in bnx2x_tx_int()
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); in bnx2x_tx_int()
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, in bnx2x_tx_int()
336 (bp->state == BNX2X_STATE_OPEN) && in bnx2x_tx_int()
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) in bnx2x_tx_int()
358 struct bnx2x *bp = fp->bp; in bnx2x_update_sge_prod() local
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, in bnx2x_get_rxhash() argument
415 if ((bp->dev->features & NETIF_F_RXHASH) && in bnx2x_get_rxhash()
434 struct bnx2x *bp = fp->bp; in bnx2x_tpa_start() local
447 mapping = dma_map_single(&bp->pdev->dev, in bnx2x_tpa_start()
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_tpa_start()
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); in bnx2x_tpa_start()
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_sge() argument
571 mapping = dma_map_page(&bp->pdev->dev, pool->page, in bnx2x_alloc_rx_sge()
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_sge()
592 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_fill_frag_skb() argument
644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); in bnx2x_fill_frag_skb()
646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_fill_frag_skb()
650 dma_unmap_page(&bp->pdev->dev, in bnx2x_fill_frag_skb()
704 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ip_csum() argument
716 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_gro_ipv6_csum() argument
728 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_gro_csum() argument
732 gro_func(bp, skb); in bnx2x_gro_csum()
737 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_gro_receive() argument
744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); in bnx2x_gro_receive()
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); in bnx2x_gro_receive()
759 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_tpa_stop() argument
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), in bnx2x_tpa_stop()
804 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_tpa_stop()
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, in bnx2x_tpa_stop()
811 bnx2x_gro_receive(bp, fp, skb); in bnx2x_tpa_stop()
829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; in bnx2x_tpa_stop()
832 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, in bnx2x_alloc_rx_data() argument
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, in bnx2x_alloc_rx_data()
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_alloc_rx_data()
888 struct bnx2x *bp = fp->bp; in bnx2x_rx_int() local
896 if (unlikely(bp->panic)) in bnx2x_rx_int()
926 if (unlikely(bp->panic)) in bnx2x_rx_int()
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages, in bnx2x_rx_int()
1008 if (bp->panic) in bnx2x_rx_int()
1018 dma_sync_single_for_cpu(&bp->pdev->dev, in bnx2x_rx_int()
1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; in bnx2x_rx_int()
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && in bnx2x_rx_int()
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, in bnx2x_rx_int()
1050 dma_unmap_single(&bp->pdev->dev, in bnx2x_rx_int()
1057 bnx2x_fp_qstats(bp, fp)-> in bnx2x_rx_int()
1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; in bnx2x_rx_int()
1073 skb->protocol = eth_type_trans(skb, bp->dev); in bnx2x_rx_int()
1076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); in bnx2x_rx_int()
1081 if (bp->dev->features & NETIF_F_RXCSUM) in bnx2x_rx_int()
1083 bnx2x_fp_qstats(bp, fp)); in bnx2x_rx_int()
1090 bnx2x_set_rx_ts(bp, skb); in bnx2x_rx_int()
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, in bnx2x_rx_int()
1143 struct bnx2x *bp = fp->bp; in bnx2x_msix_fp_int() local
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); in bnx2x_msix_fp_int()
1153 if (unlikely(bp->panic)) in bnx2x_msix_fp_int()
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); in bnx2x_msix_fp_int()
1168 void bnx2x_acquire_phy_lock(struct bnx2x *bp) in bnx2x_acquire_phy_lock() argument
1170 mutex_lock(&bp->port.phy_mutex); in bnx2x_acquire_phy_lock()
1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_acquire_phy_lock()
1175 void bnx2x_release_phy_lock(struct bnx2x *bp) in bnx2x_release_phy_lock() argument
1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); in bnx2x_release_phy_lock()
1179 mutex_unlock(&bp->port.phy_mutex); in bnx2x_release_phy_lock()
1183 u16 bnx2x_get_mf_speed(struct bnx2x *bp) in bnx2x_get_mf_speed() argument
1185 u16 line_speed = bp->link_vars.line_speed; in bnx2x_get_mf_speed()
1186 if (IS_MF(bp)) { in bnx2x_get_mf_speed()
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp, in bnx2x_get_mf_speed()
1188 bp->mf_config[BP_VN(bp)]); in bnx2x_get_mf_speed()
1193 if (IS_MF_PERCENT_BW(bp)) in bnx2x_get_mf_speed()
1214 static void bnx2x_fill_report_data(struct bnx2x *bp, in bnx2x_fill_report_data() argument
1219 if (IS_PF(bp)) { in bnx2x_fill_report_data()
1221 data->line_speed = bnx2x_get_mf_speed(bp); in bnx2x_fill_report_data()
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) in bnx2x_fill_report_data()
1228 if (!BNX2X_NUM_ETH_QUEUES(bp)) in bnx2x_fill_report_data()
1233 if (bp->link_vars.duplex == DUPLEX_FULL) in bnx2x_fill_report_data()
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) in bnx2x_fill_report_data()
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) in bnx2x_fill_report_data()
1247 *data = bp->vf_link_vars; in bnx2x_fill_report_data()
1261 void bnx2x_link_report(struct bnx2x *bp) in bnx2x_link_report() argument
1263 bnx2x_acquire_phy_lock(bp); in bnx2x_link_report()
1264 __bnx2x_link_report(bp); in bnx2x_link_report()
1265 bnx2x_release_phy_lock(bp); in bnx2x_link_report()
1276 void __bnx2x_link_report(struct bnx2x *bp) in __bnx2x_link_report() argument
1281 if (IS_PF(bp) && !CHIP_IS_E1(bp)) in __bnx2x_link_report()
1282 bnx2x_read_mf_cfg(bp); in __bnx2x_link_report()
1285 bnx2x_fill_report_data(bp, &cur_data); in __bnx2x_link_report()
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || in __bnx2x_link_report()
1290 &bp->last_reported_link.link_report_flags) && in __bnx2x_link_report()
1295 bp->link_cnt++; in __bnx2x_link_report()
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); in __bnx2x_link_report()
1303 if (IS_PF(bp)) in __bnx2x_link_report()
1304 bnx2x_iov_link_update(bp); in __bnx2x_link_report()
1308 netif_carrier_off(bp->dev); in __bnx2x_link_report()
1309 netdev_err(bp->dev, "NIC Link is Down\n"); in __bnx2x_link_report()
1315 netif_carrier_on(bp->dev); in __bnx2x_link_report()
1341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in __bnx2x_link_report()
1364 static void bnx2x_free_tpa_pool(struct bnx2x *bp, in bnx2x_free_tpa_pool() argument
1379 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_tpa_pool()
1387 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) in bnx2x_init_rx_rings_cnic() argument
1391 for_each_rx_queue_cnic(bp, j) { in bnx2x_init_rx_rings_cnic()
1392 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings_cnic()
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings_cnic()
1406 void bnx2x_init_rx_rings(struct bnx2x *bp) in bnx2x_init_rx_rings() argument
1408 int func = BP_FUNC(bp); in bnx2x_init_rx_rings()
1413 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1414 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); in bnx2x_init_rx_rings()
1421 for (i = 0; i < MAX_AGG_QS(bp); i++) { in bnx2x_init_rx_rings()
1432 bnx2x_free_tpa_pool(bp, fp, i); in bnx2x_init_rx_rings()
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, in bnx2x_init_rx_rings()
1457 bnx2x_free_rx_sge_range(bp, fp, in bnx2x_init_rx_rings()
1459 bnx2x_free_tpa_pool(bp, fp, in bnx2x_init_rx_rings()
1460 MAX_AGG_QS(bp)); in bnx2x_init_rx_rings()
1472 for_each_eth_queue(bp, j) { in bnx2x_init_rx_rings()
1473 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_init_rx_rings()
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, in bnx2x_init_rx_rings()
1488 if (CHIP_IS_E1(bp)) { in bnx2x_init_rx_rings()
1489 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1492 REG_WR(bp, BAR_USTRORM_INTMEM + in bnx2x_init_rx_rings()
1502 struct bnx2x *bp = fp->bp; in bnx2x_free_tx_skbs_queue() local
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), in bnx2x_free_tx_skbs_queue()
1518 netdev_get_tx_queue(bp->dev, in bnx2x_free_tx_skbs_queue()
1523 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_tx_skbs_cnic() argument
1527 for_each_tx_queue_cnic(bp, i) { in bnx2x_free_tx_skbs_cnic()
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs_cnic()
1532 static void bnx2x_free_tx_skbs(struct bnx2x *bp) in bnx2x_free_tx_skbs() argument
1536 for_each_eth_queue(bp, i) { in bnx2x_free_tx_skbs()
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]); in bnx2x_free_tx_skbs()
1543 struct bnx2x *bp = fp->bp; in bnx2x_free_rx_bds() local
1556 dma_unmap_single(&bp->pdev->dev, in bnx2x_free_rx_bds()
1565 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) in bnx2x_free_rx_skbs_cnic() argument
1569 for_each_rx_queue_cnic(bp, j) { in bnx2x_free_rx_skbs_cnic()
1570 bnx2x_free_rx_bds(&bp->fp[j]); in bnx2x_free_rx_skbs_cnic()
1574 static void bnx2x_free_rx_skbs(struct bnx2x *bp) in bnx2x_free_rx_skbs() argument
1578 for_each_eth_queue(bp, j) { in bnx2x_free_rx_skbs()
1579 struct bnx2x_fastpath *fp = &bp->fp[j]; in bnx2x_free_rx_skbs()
1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); in bnx2x_free_rx_skbs()
1588 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) in bnx2x_free_skbs_cnic() argument
1590 bnx2x_free_tx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1591 bnx2x_free_rx_skbs_cnic(bp); in bnx2x_free_skbs_cnic()
1594 void bnx2x_free_skbs(struct bnx2x *bp) in bnx2x_free_skbs() argument
1596 bnx2x_free_tx_skbs(bp); in bnx2x_free_skbs()
1597 bnx2x_free_rx_skbs(bp); in bnx2x_free_skbs()
1600 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) in bnx2x_update_max_mf_config() argument
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; in bnx2x_update_max_mf_config()
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { in bnx2x_update_max_mf_config()
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); in bnx2x_update_max_mf_config()
1623 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) in bnx2x_free_msix_irqs() argument
1631 if (IS_PF(bp)) { in bnx2x_free_msix_irqs()
1632 free_irq(bp->msix_table[offset].vector, bp->dev); in bnx2x_free_msix_irqs()
1634 bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1638 if (CNIC_SUPPORT(bp)) { in bnx2x_free_msix_irqs()
1644 for_each_eth_queue(bp, i) { in bnx2x_free_msix_irqs()
1648 i, bp->msix_table[offset].vector); in bnx2x_free_msix_irqs()
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); in bnx2x_free_msix_irqs()
1654 void bnx2x_free_irq(struct bnx2x *bp) in bnx2x_free_irq() argument
1656 if (bp->flags & USING_MSIX_FLAG && in bnx2x_free_irq()
1657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_free_irq()
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); in bnx2x_free_irq()
1661 if (IS_PF(bp)) in bnx2x_free_irq()
1664 bnx2x_free_msix_irqs(bp, nvecs); in bnx2x_free_irq()
1666 free_irq(bp->dev->irq, bp->dev); in bnx2x_free_irq()
1670 int bnx2x_enable_msix(struct bnx2x *bp) in bnx2x_enable_msix() argument
1675 if (IS_PF(bp)) { in bnx2x_enable_msix()
1676 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1678 bp->msix_table[0].entry); in bnx2x_enable_msix()
1683 if (CNIC_SUPPORT(bp)) { in bnx2x_enable_msix()
1684 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1686 msix_vec, bp->msix_table[msix_vec].entry); in bnx2x_enable_msix()
1691 for_each_eth_queue(bp, i) { in bnx2x_enable_msix()
1692 bp->msix_table[msix_vec].entry = msix_vec; in bnx2x_enable_msix()
1701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], in bnx2x_enable_msix()
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); in bnx2x_enable_msix()
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); in bnx2x_enable_msix()
1717 bp->flags |= USING_SINGLE_MSIX_FLAG; in bnx2x_enable_msix()
1720 bp->num_ethernet_queues = 1; in bnx2x_enable_msix()
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1734 bp->num_ethernet_queues -= diff; in bnx2x_enable_msix()
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_enable_msix()
1738 bp->num_queues); in bnx2x_enable_msix()
1741 bp->flags |= USING_MSIX_FLAG; in bnx2x_enable_msix()
1748 bp->flags |= DISABLE_MSI_FLAG; in bnx2x_enable_msix()
1753 static int bnx2x_req_msix_irqs(struct bnx2x *bp) in bnx2x_req_msix_irqs() argument
1758 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1759 rc = request_irq(bp->msix_table[offset++].vector, in bnx2x_req_msix_irqs()
1761 bp->dev->name, bp->dev); in bnx2x_req_msix_irqs()
1768 if (CNIC_SUPPORT(bp)) in bnx2x_req_msix_irqs()
1771 for_each_eth_queue(bp, i) { in bnx2x_req_msix_irqs()
1772 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_req_msix_irqs()
1774 bp->dev->name, i); in bnx2x_req_msix_irqs()
1776 rc = request_irq(bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1780 bp->msix_table[offset].vector, rc); in bnx2x_req_msix_irqs()
1781 bnx2x_free_msix_irqs(bp, offset); in bnx2x_req_msix_irqs()
1788 i = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_req_msix_irqs()
1789 if (IS_PF(bp)) { in bnx2x_req_msix_irqs()
1790 offset = 1 + CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1791 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1793 bp->msix_table[0].vector, in bnx2x_req_msix_irqs()
1794 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1795 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1797 offset = CNIC_SUPPORT(bp); in bnx2x_req_msix_irqs()
1798 netdev_info(bp->dev, in bnx2x_req_msix_irqs()
1800 0, bp->msix_table[offset].vector, in bnx2x_req_msix_irqs()
1801 i - 1, bp->msix_table[offset + i - 1].vector); in bnx2x_req_msix_irqs()
1806 int bnx2x_enable_msi(struct bnx2x *bp) in bnx2x_enable_msi() argument
1810 rc = pci_enable_msi(bp->pdev); in bnx2x_enable_msi()
1815 bp->flags |= USING_MSI_FLAG; in bnx2x_enable_msi()
1820 static int bnx2x_req_irq(struct bnx2x *bp) in bnx2x_req_irq() argument
1825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) in bnx2x_req_irq()
1830 if (bp->flags & USING_MSIX_FLAG) in bnx2x_req_irq()
1831 irq = bp->msix_table[0].vector; in bnx2x_req_irq()
1833 irq = bp->pdev->irq; in bnx2x_req_irq()
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); in bnx2x_req_irq()
1838 static int bnx2x_setup_irqs(struct bnx2x *bp) in bnx2x_setup_irqs() argument
1841 if (bp->flags & USING_MSIX_FLAG && in bnx2x_setup_irqs()
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { in bnx2x_setup_irqs()
1843 rc = bnx2x_req_msix_irqs(bp); in bnx2x_setup_irqs()
1847 rc = bnx2x_req_irq(bp); in bnx2x_setup_irqs()
1852 if (bp->flags & USING_MSI_FLAG) { in bnx2x_setup_irqs()
1853 bp->dev->irq = bp->pdev->irq; in bnx2x_setup_irqs()
1854 netdev_info(bp->dev, "using MSI IRQ %d\n", in bnx2x_setup_irqs()
1855 bp->dev->irq); in bnx2x_setup_irqs()
1857 if (bp->flags & USING_MSIX_FLAG) { in bnx2x_setup_irqs()
1858 bp->dev->irq = bp->msix_table[0].vector; in bnx2x_setup_irqs()
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n", in bnx2x_setup_irqs()
1860 bp->dev->irq); in bnx2x_setup_irqs()
1867 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) in bnx2x_napi_enable_cnic() argument
1871 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_enable_cnic()
1872 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable_cnic()
1873 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable_cnic()
1877 static void bnx2x_napi_enable(struct bnx2x *bp) in bnx2x_napi_enable() argument
1881 for_each_eth_queue(bp, i) { in bnx2x_napi_enable()
1882 bnx2x_fp_busy_poll_init(&bp->fp[i]); in bnx2x_napi_enable()
1883 napi_enable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_enable()
1887 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) in bnx2x_napi_disable_cnic() argument
1891 for_each_rx_queue_cnic(bp, i) { in bnx2x_napi_disable_cnic()
1892 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable_cnic()
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable_cnic()
1898 static void bnx2x_napi_disable(struct bnx2x *bp) in bnx2x_napi_disable() argument
1902 for_each_eth_queue(bp, i) { in bnx2x_napi_disable()
1903 napi_disable(&bnx2x_fp(bp, i, napi)); in bnx2x_napi_disable()
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i])) in bnx2x_napi_disable()
1909 void bnx2x_netif_start(struct bnx2x *bp) in bnx2x_netif_start() argument
1911 if (netif_running(bp->dev)) { in bnx2x_netif_start()
1912 bnx2x_napi_enable(bp); in bnx2x_netif_start()
1913 if (CNIC_LOADED(bp)) in bnx2x_netif_start()
1914 bnx2x_napi_enable_cnic(bp); in bnx2x_netif_start()
1915 bnx2x_int_enable(bp); in bnx2x_netif_start()
1916 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_netif_start()
1917 netif_tx_wake_all_queues(bp->dev); in bnx2x_netif_start()
1921 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) in bnx2x_netif_stop() argument
1923 bnx2x_int_disable_sync(bp, disable_hw); in bnx2x_netif_stop()
1924 bnx2x_napi_disable(bp); in bnx2x_netif_stop()
1925 if (CNIC_LOADED(bp)) in bnx2x_netif_stop()
1926 bnx2x_napi_disable_cnic(bp); in bnx2x_netif_stop()
1932 struct bnx2x *bp = netdev_priv(dev); in bnx2x_select_queue() local
1934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { in bnx2x_select_queue()
1948 return bnx2x_fcoe_tx(bp, txq_index); in bnx2x_select_queue()
1952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_select_queue()
1955 void bnx2x_set_num_queues(struct bnx2x *bp) in bnx2x_set_num_queues() argument
1958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); in bnx2x_set_num_queues()
1961 if (IS_MF_STORAGE_ONLY(bp)) in bnx2x_set_num_queues()
1962 bp->num_ethernet_queues = 1; in bnx2x_set_num_queues()
1965 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ in bnx2x_set_num_queues()
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; in bnx2x_set_num_queues()
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); in bnx2x_set_num_queues()
1993 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) in bnx2x_set_real_num_queues() argument
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; in bnx2x_set_real_num_queues()
1998 rx = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_set_real_num_queues()
2001 if (include_cnic && !NO_FCOE(bp)) { in bnx2x_set_real_num_queues()
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx); in bnx2x_set_real_num_queues()
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx); in bnx2x_set_real_num_queues()
2023 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) in bnx2x_set_rx_buf_size() argument
2027 for_each_queue(bp, i) { in bnx2x_set_rx_buf_size()
2028 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_set_rx_buf_size()
2041 mtu = bp->dev->mtu; in bnx2x_set_rx_buf_size()
2055 static int bnx2x_init_rss(struct bnx2x *bp) in bnx2x_init_rss() argument
2058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_init_rss()
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) in bnx2x_init_rss()
2064 bp->rss_conf_obj.ind_table[i] = in bnx2x_init_rss()
2065 bp->fp->cl_id + in bnx2x_init_rss()
2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); in bnx2x_init_rss()
2079 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, in bnx2x_rss() argument
2108 if (!CHIP_IS_E1x(bp)) { in bnx2x_rss()
2131 if (IS_PF(bp)) in bnx2x_rss()
2132 return bnx2x_config_rss(bp, &params); in bnx2x_rss()
2134 return bnx2x_vfpf_config_rss(bp, &params); in bnx2x_rss()
2137 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) in bnx2x_init_hw() argument
2144 func_params.f_obj = &bp->func_obj; in bnx2x_init_hw()
2149 return bnx2x_func_state_change(bp, &func_params); in bnx2x_init_hw()
2156 void bnx2x_squeeze_objects(struct bnx2x *bp) in bnx2x_squeeze_objects() argument
2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; in bnx2x_squeeze_objects()
2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, in bnx2x_squeeze_objects()
2186 rparam.mcast_obj = &bp->mcast_obj; in bnx2x_squeeze_objects()
2193 netif_addr_lock_bh(bp->dev); in bnx2x_squeeze_objects()
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); in bnx2x_squeeze_objects()
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2205 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); in bnx2x_squeeze_objects()
2211 netif_addr_unlock_bh(bp->dev); in bnx2x_squeeze_objects()
2215 #define LOAD_ERROR_EXIT(bp, label) \ argument
2217 (bp)->state = BNX2X_STATE_ERROR; \
2221 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2223 bp->cnic_loaded = false; \
2227 #define LOAD_ERROR_EXIT(bp, label) \ argument
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2233 #define LOAD_ERROR_EXIT_CNIC(bp, label) \ argument
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2241 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) in bnx2x_free_fw_stats_mem() argument
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, in bnx2x_free_fw_stats_mem()
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_free_fw_stats_mem()
2248 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) in bnx2x_alloc_fw_stats_mem() argument
2251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; in bnx2x_alloc_fw_stats_mem()
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; in bnx2x_alloc_fw_stats_mem()
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; in bnx2x_alloc_fw_stats_mem()
2268 if (IS_SRIOV(bp)) in bnx2x_alloc_fw_stats_mem()
2269 vf_headroom = bnx2x_vf_headroom(bp); in bnx2x_alloc_fw_stats_mem()
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + in bnx2x_alloc_fw_stats_mem()
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? in bnx2x_alloc_fw_stats_mem()
2282 bp->fw_stats_num, vf_headroom, num_groups); in bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + in bnx2x_alloc_fw_stats_mem()
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + in bnx2x_alloc_fw_stats_mem()
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, in bnx2x_alloc_fw_stats_mem()
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2302 if (!bp->fw_stats) in bnx2x_alloc_fw_stats_mem()
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; in bnx2x_alloc_fw_stats_mem()
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping; in bnx2x_alloc_fw_stats_mem()
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) in bnx2x_alloc_fw_stats_mem()
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); in bnx2x_alloc_fw_stats_mem()
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping + in bnx2x_alloc_fw_stats_mem()
2311 bp->fw_stats_req_sz; in bnx2x_alloc_fw_stats_mem()
2314 U64_HI(bp->fw_stats_req_mapping), in bnx2x_alloc_fw_stats_mem()
2315 U64_LO(bp->fw_stats_req_mapping)); in bnx2x_alloc_fw_stats_mem()
2317 U64_HI(bp->fw_stats_data_mapping), in bnx2x_alloc_fw_stats_mem()
2318 U64_LO(bp->fw_stats_data_mapping)); in bnx2x_alloc_fw_stats_mem()
2322 bnx2x_free_fw_stats_mem(bp); in bnx2x_alloc_fw_stats_mem()
2328 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) in bnx2x_nic_load_request() argument
2333 bp->fw_seq = in bnx2x_nic_load_request()
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & in bnx2x_nic_load_request()
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); in bnx2x_nic_load_request()
2339 bp->fw_drv_pulse_wr_seq = in bnx2x_nic_load_request()
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & in bnx2x_nic_load_request()
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); in bnx2x_nic_load_request()
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) in bnx2x_nic_load_request()
2350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); in bnx2x_nic_load_request()
2372 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) in bnx2x_compare_fw_ver() argument
2384 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); in bnx2x_compare_fw_ver()
2404 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) in bnx2x_nic_load_no_mcp() argument
2406 int path = BP_PATH(bp); in bnx2x_nic_load_no_mcp()
2425 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) in bnx2x_nic_load_pmf() argument
2430 bp->port.pmf = 1; in bnx2x_nic_load_pmf()
2437 bp->port.pmf = 0; in bnx2x_nic_load_pmf()
2440 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); in bnx2x_nic_load_pmf()
2443 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) in bnx2x_nic_load_afex_dcc() argument
2447 (bp->common.shmem2_base)) { in bnx2x_nic_load_afex_dcc()
2448 if (SHMEM2_HAS(bp, dcc_support)) in bnx2x_nic_load_afex_dcc()
2449 SHMEM2_WR(bp, dcc_support, in bnx2x_nic_load_afex_dcc()
2452 if (SHMEM2_HAS(bp, afex_driver_support)) in bnx2x_nic_load_afex_dcc()
2453 SHMEM2_WR(bp, afex_driver_support, in bnx2x_nic_load_afex_dcc()
2458 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load_afex_dcc()
2470 static void bnx2x_bz_fp(struct bnx2x *bp, int index) in bnx2x_bz_fp() argument
2472 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_bz_fp()
2486 fp->bp = bp; in bnx2x_bz_fp()
2489 fp->max_cos = bp->max_cos; in bnx2x_bz_fp()
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; in bnx2x_bz_fp()
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * in bnx2x_bz_fp()
2500 BNX2X_NUM_ETH_QUEUES(bp) + index]; in bnx2x_bz_fp()
2505 if (bp->dev->features & NETIF_F_LRO) in bnx2x_bz_fp()
2507 else if (bp->dev->features & NETIF_F_GRO && in bnx2x_bz_fp()
2508 bnx2x_mtu_allows_gro(bp->dev->mtu)) in bnx2x_bz_fp()
2516 if (bp->disable_tpa || IS_FCOE_FP(fp)) in bnx2x_bz_fp()
2520 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) in bnx2x_set_os_driver_state() argument
2524 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) in bnx2x_set_os_driver_state()
2527 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); in bnx2x_set_os_driver_state()
2531 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); in bnx2x_set_os_driver_state()
2534 int bnx2x_load_cnic(struct bnx2x *bp) in bnx2x_load_cnic() argument
2536 int i, rc, port = BP_PORT(bp); in bnx2x_load_cnic()
2540 mutex_init(&bp->cnic_mutex); in bnx2x_load_cnic()
2542 if (IS_PF(bp)) { in bnx2x_load_cnic()
2543 rc = bnx2x_alloc_mem_cnic(bp); in bnx2x_load_cnic()
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2550 rc = bnx2x_alloc_fp_mem_cnic(bp); in bnx2x_load_cnic()
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2557 rc = bnx2x_set_real_num_queues(bp, 1); in bnx2x_load_cnic()
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); in bnx2x_load_cnic()
2564 bnx2x_add_all_napi_cnic(bp); in bnx2x_load_cnic()
2566 bnx2x_napi_enable_cnic(bp); in bnx2x_load_cnic()
2568 rc = bnx2x_init_hw_func_cnic(bp); in bnx2x_load_cnic()
2570 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); in bnx2x_load_cnic()
2572 bnx2x_nic_init_cnic(bp); in bnx2x_load_cnic()
2574 if (IS_PF(bp)) { in bnx2x_load_cnic()
2576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); in bnx2x_load_cnic()
2579 for_each_cnic_queue(bp, i) { in bnx2x_load_cnic()
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); in bnx2x_load_cnic()
2583 LOAD_ERROR_EXIT(bp, load_error_cnic2); in bnx2x_load_cnic()
2589 bnx2x_set_rx_mode_inner(bp); in bnx2x_load_cnic()
2592 bnx2x_get_iscsi_info(bp); in bnx2x_load_cnic()
2593 bnx2x_setup_cnic_irq_info(bp); in bnx2x_load_cnic()
2594 bnx2x_setup_cnic_info(bp); in bnx2x_load_cnic()
2595 bp->cnic_loaded = true; in bnx2x_load_cnic()
2596 if (bp->state == BNX2X_STATE_OPEN) in bnx2x_load_cnic()
2597 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); in bnx2x_load_cnic()
2606 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); in bnx2x_load_cnic()
2609 bnx2x_napi_disable_cnic(bp); in bnx2x_load_cnic()
2611 if (bnx2x_set_real_num_queues(bp, 0)) in bnx2x_load_cnic()
2615 bnx2x_free_fp_mem_cnic(bp); in bnx2x_load_cnic()
2616 bnx2x_free_mem_cnic(bp); in bnx2x_load_cnic()
2622 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) in bnx2x_nic_load() argument
2624 int port = BP_PORT(bp); in bnx2x_nic_load()
2629 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); in bnx2x_nic_load()
2632 if (unlikely(bp->panic)) { in bnx2x_nic_load()
2638 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; in bnx2x_nic_load()
2641 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); in bnx2x_nic_load()
2643 &bp->last_reported_link.link_report_flags); in bnx2x_nic_load()
2645 if (IS_PF(bp)) in bnx2x_nic_load()
2647 bnx2x_ilt_set_info(bp); in bnx2x_nic_load()
2654 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); in bnx2x_nic_load()
2655 for_each_queue(bp, i) in bnx2x_nic_load()
2656 bnx2x_bz_fp(bp, i); in bnx2x_nic_load()
2657 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + in bnx2x_nic_load()
2658 bp->num_cnic_queues) * in bnx2x_nic_load()
2661 bp->fcoe_init = false; in bnx2x_nic_load()
2664 bnx2x_set_rx_buf_size(bp); in bnx2x_nic_load()
2666 if (IS_PF(bp)) { in bnx2x_nic_load()
2667 rc = bnx2x_alloc_mem(bp); in bnx2x_nic_load()
2677 rc = bnx2x_alloc_fp_mem(bp); in bnx2x_nic_load()
2680 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2684 if (bnx2x_alloc_fw_stats_mem(bp)) in bnx2x_nic_load()
2685 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2688 if (IS_VF(bp)) { in bnx2x_nic_load()
2689 rc = bnx2x_vfpf_init(bp); in bnx2x_nic_load()
2691 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2698 rc = bnx2x_set_real_num_queues(bp, 0); in bnx2x_nic_load()
2701 LOAD_ERROR_EXIT(bp, load_error0); in bnx2x_nic_load()
2708 bnx2x_setup_tc(bp->dev, bp->max_cos); in bnx2x_nic_load()
2711 bnx2x_add_all_napi(bp); in bnx2x_nic_load()
2713 bnx2x_napi_enable(bp); in bnx2x_nic_load()
2715 if (IS_PF(bp)) { in bnx2x_nic_load()
2717 bnx2x_set_pf_load(bp); in bnx2x_nic_load()
2720 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2722 rc = bnx2x_nic_load_request(bp, &load_code); in bnx2x_nic_load()
2724 LOAD_ERROR_EXIT(bp, load_error1); in bnx2x_nic_load()
2727 rc = bnx2x_compare_fw_ver(bp, load_code, true); in bnx2x_nic_load()
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2730 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2733 load_code = bnx2x_nic_load_no_mcp(bp, port); in bnx2x_nic_load()
2737 bnx2x_nic_load_pmf(bp, load_code); in bnx2x_nic_load()
2740 bnx2x__init_func_obj(bp); in bnx2x_nic_load()
2743 rc = bnx2x_init_hw(bp, load_code); in bnx2x_nic_load()
2746 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2747 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2751 bnx2x_pre_irq_nic_init(bp); in bnx2x_nic_load()
2754 rc = bnx2x_setup_irqs(bp); in bnx2x_nic_load()
2757 if (IS_PF(bp)) in bnx2x_nic_load()
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2759 LOAD_ERROR_EXIT(bp, load_error2); in bnx2x_nic_load()
2763 if (IS_PF(bp)) { in bnx2x_nic_load()
2765 bnx2x_post_irq_nic_init(bp, load_code); in bnx2x_nic_load()
2767 bnx2x_init_bp_objs(bp); in bnx2x_nic_load()
2768 bnx2x_iov_nic_init(bp); in bnx2x_nic_load()
2771 bp->afex_def_vlan_tag = -1; in bnx2x_nic_load()
2772 bnx2x_nic_load_afex_dcc(bp, load_code); in bnx2x_nic_load()
2773 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; in bnx2x_nic_load()
2774 rc = bnx2x_func_start(bp); in bnx2x_nic_load()
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); in bnx2x_nic_load()
2779 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2783 if (!BP_NOMCP(bp)) { in bnx2x_nic_load()
2784 load_code = bnx2x_fw_command(bp, in bnx2x_nic_load()
2789 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2794 bnx2x_update_coalesce(bp); in bnx2x_nic_load()
2798 rc = bnx2x_setup_leading(bp); in bnx2x_nic_load()
2801 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2805 for_each_nondefault_eth_queue(bp, i) { in bnx2x_nic_load()
2806 if (IS_PF(bp)) in bnx2x_nic_load()
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); in bnx2x_nic_load()
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); in bnx2x_nic_load()
2812 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2817 rc = bnx2x_init_rss(bp); in bnx2x_nic_load()
2820 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2824 bp->state = BNX2X_STATE_OPEN; in bnx2x_nic_load()
2827 if (IS_PF(bp)) in bnx2x_nic_load()
2828 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_nic_load()
2830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, in bnx2x_nic_load()
2834 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2837 if (IS_PF(bp) && bp->pending_max) { in bnx2x_nic_load()
2838 bnx2x_update_max_mf_config(bp, bp->pending_max); in bnx2x_nic_load()
2839 bp->pending_max = 0; in bnx2x_nic_load()
2842 if (bp->port.pmf) { in bnx2x_nic_load()
2843 rc = bnx2x_initial_phy_init(bp, load_mode); in bnx2x_nic_load()
2845 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2847 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; in bnx2x_nic_load()
2852 rc = bnx2x_vlan_reconfigure_vid(bp); in bnx2x_nic_load()
2854 LOAD_ERROR_EXIT(bp, load_error3); in bnx2x_nic_load()
2857 bnx2x_set_rx_mode_inner(bp); in bnx2x_nic_load()
2859 if (bp->flags & PTP_SUPPORTED) { in bnx2x_nic_load()
2860 bnx2x_init_ptp(bp); in bnx2x_nic_load()
2861 bnx2x_configure_ptp_filters(bp); in bnx2x_nic_load()
2867 netif_tx_wake_all_queues(bp->dev); in bnx2x_nic_load()
2871 netif_tx_start_all_queues(bp->dev); in bnx2x_nic_load()
2877 bp->state = BNX2X_STATE_DIAG; in bnx2x_nic_load()
2884 if (bp->port.pmf) in bnx2x_nic_load()
2885 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); in bnx2x_nic_load()
2887 bnx2x__link_status_update(bp); in bnx2x_nic_load()
2890 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnx2x_nic_load()
2892 if (CNIC_ENABLED(bp)) in bnx2x_nic_load()
2893 bnx2x_load_cnic(bp); in bnx2x_nic_load()
2895 if (IS_PF(bp)) in bnx2x_nic_load()
2896 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); in bnx2x_nic_load()
2898 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_load()
2901 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_load()
2903 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); in bnx2x_nic_load()
2904 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_load()
2910 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { in bnx2x_nic_load()
2912 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_nic_load()
2917 if (IS_PF(bp)) in bnx2x_nic_load()
2918 bnx2x_update_mfw_dump(bp); in bnx2x_nic_load()
2921 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) in bnx2x_nic_load()
2922 bnx2x_dcbx_init(bp, false); in bnx2x_nic_load()
2924 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) in bnx2x_nic_load()
2925 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); in bnx2x_nic_load()
2933 if (IS_PF(bp)) { in bnx2x_nic_load()
2934 bnx2x_int_disable_sync(bp, 1); in bnx2x_nic_load()
2937 bnx2x_squeeze_objects(bp); in bnx2x_nic_load()
2941 bnx2x_free_skbs(bp); in bnx2x_nic_load()
2942 for_each_rx_queue(bp, i) in bnx2x_nic_load()
2943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_load()
2946 bnx2x_free_irq(bp); in bnx2x_nic_load()
2948 if (IS_PF(bp) && !BP_NOMCP(bp)) { in bnx2x_nic_load()
2949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); in bnx2x_nic_load()
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); in bnx2x_nic_load()
2953 bp->port.pmf = 0; in bnx2x_nic_load()
2955 bnx2x_napi_disable(bp); in bnx2x_nic_load()
2956 bnx2x_del_all_napi(bp); in bnx2x_nic_load()
2959 if (IS_PF(bp)) in bnx2x_nic_load()
2960 bnx2x_clear_pf_load(bp); in bnx2x_nic_load()
2962 bnx2x_free_fw_stats_mem(bp); in bnx2x_nic_load()
2963 bnx2x_free_fp_mem(bp); in bnx2x_nic_load()
2964 bnx2x_free_mem(bp); in bnx2x_nic_load()
2970 int bnx2x_drain_tx_queues(struct bnx2x *bp) in bnx2x_drain_tx_queues() argument
2975 for_each_tx_queue(bp, i) { in bnx2x_drain_tx_queues()
2976 struct bnx2x_fastpath *fp = &bp->fp[i]; in bnx2x_drain_tx_queues()
2979 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); in bnx2x_drain_tx_queues()
2987 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) in bnx2x_nic_unload() argument
2994 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) in bnx2x_nic_unload()
2995 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); in bnx2x_nic_unload()
2998 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { in bnx2x_nic_unload()
3000 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); in bnx2x_nic_unload()
3001 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], in bnx2x_nic_unload()
3005 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && in bnx2x_nic_unload()
3006 (bp->state == BNX2X_STATE_CLOSED || in bnx2x_nic_unload()
3007 bp->state == BNX2X_STATE_ERROR)) { in bnx2x_nic_unload()
3015 bp->recovery_state = BNX2X_RECOVERY_DONE; in bnx2x_nic_unload()
3016 bp->is_leader = 0; in bnx2x_nic_unload()
3017 bnx2x_release_leader_lock(bp); in bnx2x_nic_unload()
3031 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) in bnx2x_nic_unload()
3038 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; in bnx2x_nic_unload()
3042 bnx2x_iov_channel_down(bp); in bnx2x_nic_unload()
3044 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3045 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); in bnx2x_nic_unload()
3048 bnx2x_tx_disable(bp); in bnx2x_nic_unload()
3049 netdev_reset_tc(bp->dev); in bnx2x_nic_unload()
3051 bp->rx_mode = BNX2X_RX_MODE_NONE; in bnx2x_nic_unload()
3053 del_timer_sync(&bp->timer); in bnx2x_nic_unload()
3055 if (IS_PF(bp)) { in bnx2x_nic_unload()
3057 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; in bnx2x_nic_unload()
3058 bnx2x_drv_pulse(bp); in bnx2x_nic_unload()
3059 bnx2x_stats_handle(bp, STATS_EVENT_STOP); in bnx2x_nic_unload()
3060 bnx2x_save_statistics(bp); in bnx2x_nic_unload()
3064 bnx2x_drain_tx_queues(bp); in bnx2x_nic_unload()
3069 if (IS_VF(bp)) in bnx2x_nic_unload()
3070 bnx2x_vfpf_close_vf(bp); in bnx2x_nic_unload()
3073 bnx2x_chip_cleanup(bp, unload_mode, keep_link); in bnx2x_nic_unload()
3076 bnx2x_send_unload_req(bp, unload_mode); in bnx2x_nic_unload()
3084 if (!CHIP_IS_E1x(bp)) in bnx2x_nic_unload()
3085 bnx2x_pf_disable(bp); in bnx2x_nic_unload()
3088 bnx2x_netif_stop(bp, 1); in bnx2x_nic_unload()
3090 bnx2x_del_all_napi(bp); in bnx2x_nic_unload()
3091 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3092 bnx2x_del_all_napi_cnic(bp); in bnx2x_nic_unload()
3094 bnx2x_free_irq(bp); in bnx2x_nic_unload()
3097 bnx2x_send_unload_done(bp, false); in bnx2x_nic_unload()
3104 if (IS_PF(bp)) in bnx2x_nic_unload()
3105 bnx2x_squeeze_objects(bp); in bnx2x_nic_unload()
3108 bp->sp_state = 0; in bnx2x_nic_unload()
3110 bp->port.pmf = 0; in bnx2x_nic_unload()
3113 bp->sp_rtnl_state = 0; in bnx2x_nic_unload()
3117 bnx2x_free_skbs(bp); in bnx2x_nic_unload()
3118 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3119 bnx2x_free_skbs_cnic(bp); in bnx2x_nic_unload()
3120 for_each_rx_queue(bp, i) in bnx2x_nic_unload()
3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); in bnx2x_nic_unload()
3123 bnx2x_free_fp_mem(bp); in bnx2x_nic_unload()
3124 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3125 bnx2x_free_fp_mem_cnic(bp); in bnx2x_nic_unload()
3127 if (IS_PF(bp)) { in bnx2x_nic_unload()
3128 if (CNIC_LOADED(bp)) in bnx2x_nic_unload()
3129 bnx2x_free_mem_cnic(bp); in bnx2x_nic_unload()
3131 bnx2x_free_mem(bp); in bnx2x_nic_unload()
3133 bp->state = BNX2X_STATE_CLOSED; in bnx2x_nic_unload()
3134 bp->cnic_loaded = false; in bnx2x_nic_unload()
3137 if (IS_PF(bp)) in bnx2x_nic_unload()
3138 bnx2x_update_mng_version(bp); in bnx2x_nic_unload()
3143 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { in bnx2x_nic_unload()
3144 bnx2x_set_reset_in_progress(bp); in bnx2x_nic_unload()
3148 bnx2x_set_reset_global(bp); in bnx2x_nic_unload()
3154 if (IS_PF(bp) && in bnx2x_nic_unload()
3155 !bnx2x_clear_pf_load(bp) && in bnx2x_nic_unload()
3156 bnx2x_reset_is_done(bp, BP_PATH(bp))) in bnx2x_nic_unload()
3157 bnx2x_disable_close_the_gate(bp); in bnx2x_nic_unload()
3164 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) in bnx2x_set_power_state() argument
3169 if (!bp->pdev->pm_cap) { in bnx2x_set_power_state()
3174 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); in bnx2x_set_power_state()
3178 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3190 if (atomic_read(&bp->pdev->enable_cnt) != 1) in bnx2x_set_power_state()
3193 if (CHIP_REV_IS_SLOW(bp)) in bnx2x_set_power_state()
3199 if (bp->wol) in bnx2x_set_power_state()
3202 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, in bnx2x_set_power_state()
3211 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); in bnx2x_set_power_state()
3226 struct bnx2x *bp = fp->bp; in bnx2x_poll() local
3230 if (unlikely(bp->panic)) { in bnx2x_poll()
3240 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); in bnx2x_poll()
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, in bnx2x_poll()
3303 struct bnx2x *bp = fp->bp; in bnx2x_low_latency_recv() local
3306 if ((bp->state == BNX2X_STATE_CLOSED) || in bnx2x_low_latency_recv()
3307 (bp->state == BNX2X_STATE_ERROR) || in bnx2x_low_latency_recv()
3308 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) in bnx2x_low_latency_recv()
3327 static u16 bnx2x_tx_split(struct bnx2x *bp, in bnx2x_tx_split() argument
3386 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) in bnx2x_xmit_type() argument
3404 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { in bnx2x_xmit_type()
3443 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_pkt_req_lin() argument
3564 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_enc() argument
3598 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum_e2() argument
3620 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_sbd_csum() argument
3641 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, in bnx2x_set_pbd_csum() argument
3784 struct bnx2x *bp = netdev_priv(dev); in bnx2x_start_xmit() local
3798 u32 xmit_type = bnx2x_xmit_type(bp, skb); in bnx2x_start_xmit()
3806 if (unlikely(bp->panic)) in bnx2x_start_xmit()
3813 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); in bnx2x_start_xmit()
3815 txdata = &bp->bnx2x_txq[txq_index]; in bnx2x_start_xmit()
3826 if (unlikely(bnx2x_tx_avail(bp, txdata) < in bnx2x_start_xmit()
3833 bnx2x_fp_qstats(bp, txdata->parent_fp); in bnx2x_start_xmit()
3838 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
3865 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { in bnx2x_start_xmit()
3867 bp->lin_cnt++; in bnx2x_start_xmit()
3877 mapping = dma_map_single(&bp->pdev->dev, skb->data, in bnx2x_start_xmit()
3879 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
3911 if (!(bp->flags & TX_TIMESTAMPING_EN)) { in bnx2x_start_xmit()
3913 } else if (bp->ptp_tx_skb) { in bnx2x_start_xmit()
3918 bp->ptp_tx_skb = skb_get(skb); in bnx2x_start_xmit()
3919 bp->ptp_tx_start = jiffies; in bnx2x_start_xmit()
3920 schedule_work(&bp->ptp_task); in bnx2x_start_xmit()
3946 if (IS_VF(bp)) in bnx2x_start_xmit()
3963 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); in bnx2x_start_xmit()
3965 if (!CHIP_IS_E1x(bp)) { in bnx2x_start_xmit()
3973 hlen = bnx2x_set_pbd_csum_enc(bp, skb, in bnx2x_start_xmit()
4007 hlen = bnx2x_set_pbd_csum_e2(bp, skb, in bnx2x_start_xmit()
4016 if (IS_VF(bp)) { in bnx2x_start_xmit()
4028 if (bp->flags & TX_SWITCHING) in bnx2x_start_xmit()
4053 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); in bnx2x_start_xmit()
4084 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, in bnx2x_start_xmit()
4088 if (!CHIP_IS_E1x(bp)) in bnx2x_start_xmit()
4109 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, in bnx2x_start_xmit()
4111 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { in bnx2x_start_xmit()
4123 bnx2x_free_tx_pkt(bp, txdata, in bnx2x_start_xmit()
4206 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); in bnx2x_start_xmit()
4212 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { in bnx2x_start_xmit()
4220 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; in bnx2x_start_xmit()
4221 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) in bnx2x_start_xmit()
4229 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) in bnx2x_get_c2s_mapping() argument
4231 int mfw_vn = BP_FW_MB_IDX(bp); in bnx2x_get_c2s_mapping()
4235 if (!IS_MF_BD(bp)) { in bnx2x_get_c2s_mapping()
4245 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); in bnx2x_get_c2s_mapping()
4252 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); in bnx2x_get_c2s_mapping()
4259 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); in bnx2x_get_c2s_mapping()
4274 struct bnx2x *bp = netdev_priv(dev); in bnx2x_setup_tc() local
4288 if (num_tc > bp->max_cos) { in bnx2x_setup_tc()
4290 num_tc, bp->max_cos); in bnx2x_setup_tc()
4300 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); in bnx2x_setup_tc()
4306 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4309 outer_prio, bp->prio_to_cos[outer_prio]); in bnx2x_setup_tc()
4323 for (cos = 0; cos < bp->max_cos; cos++) { in bnx2x_setup_tc()
4324 count = BNX2X_NUM_ETH_QUEUES(bp); in bnx2x_setup_tc()
4325 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); in bnx2x_setup_tc()
4339 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mac_addr() local
4347 if (IS_MF_STORAGE_ONLY(bp)) { in bnx2x_change_mac_addr()
4353 rc = bnx2x_set_eth_mac(bp, false); in bnx2x_change_mac_addr()
4361 rc = bnx2x_set_eth_mac(bp, true); in bnx2x_change_mac_addr()
4363 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) in bnx2x_change_mac_addr()
4364 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); in bnx2x_change_mac_addr()
4369 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) in bnx2x_free_fp_mem_at() argument
4371 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); in bnx2x_free_fp_mem_at()
4372 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; in bnx2x_free_fp_mem_at()
4382 if (!CHIP_IS_E1x(bp)) in bnx2x_free_fp_mem_at()
4384 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4389 bnx2x_fp(bp, fp_index, in bnx2x_free_fp_mem_at()
4395 if (!skip_rx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4399 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); in bnx2x_free_fp_mem_at()
4400 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), in bnx2x_free_fp_mem_at()
4401 bnx2x_fp(bp, fp_index, rx_desc_mapping), in bnx2x_free_fp_mem_at()
4404 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), in bnx2x_free_fp_mem_at()
4405 bnx2x_fp(bp, fp_index, rx_comp_mapping), in bnx2x_free_fp_mem_at()
4410 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); in bnx2x_free_fp_mem_at()
4411 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), in bnx2x_free_fp_mem_at()
4412 bnx2x_fp(bp, fp_index, rx_sge_mapping), in bnx2x_free_fp_mem_at()
4417 if (!skip_tx_queue(bp, fp_index)) { in bnx2x_free_fp_mem_at()
4435 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) in bnx2x_free_fp_mem_cnic() argument
4438 for_each_cnic_queue(bp, i) in bnx2x_free_fp_mem_cnic()
4439 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem_cnic()
4442 void bnx2x_free_fp_mem(struct bnx2x *bp) in bnx2x_free_fp_mem() argument
4445 for_each_eth_queue(bp, i) in bnx2x_free_fp_mem()
4446 bnx2x_free_fp_mem_at(bp, i); in bnx2x_free_fp_mem()
4449 static void set_sb_shortcuts(struct bnx2x *bp, int index) in set_sb_shortcuts() argument
4451 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); in set_sb_shortcuts()
4452 if (!CHIP_IS_E1x(bp)) { in set_sb_shortcuts()
4453 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4455 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4458 bnx2x_fp(bp, index, sb_index_values) = in set_sb_shortcuts()
4460 bnx2x_fp(bp, index, sb_running_index) = in set_sb_shortcuts()
4469 struct bnx2x *bp = fp->bp; in bnx2x_alloc_rx_bds() local
4480 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { in bnx2x_alloc_rx_bds()
4499 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; in bnx2x_alloc_rx_bds()
4522 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) in bnx2x_alloc_fp_mem_at() argument
4525 struct bnx2x_fastpath *fp = &bp->fp[index]; in bnx2x_alloc_fp_mem_at()
4530 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { in bnx2x_alloc_fp_mem_at()
4532 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4533 } else if (!bp->rx_ring_size) { in bnx2x_alloc_fp_mem_at()
4534 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); in bnx2x_alloc_fp_mem_at()
4536 if (CHIP_IS_E3(bp)) { in bnx2x_alloc_fp_mem_at()
4537 u32 cfg = SHMEM_RD(bp, in bnx2x_alloc_fp_mem_at()
4538 dev_info.port_hw_config[BP_PORT(bp)]. in bnx2x_alloc_fp_mem_at()
4548 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : in bnx2x_alloc_fp_mem_at()
4551 bp->rx_ring_size = rx_ring_size; in bnx2x_alloc_fp_mem_at()
4553 rx_ring_size = bp->rx_ring_size; in bnx2x_alloc_fp_mem_at()
4558 sb = &bnx2x_fp(bp, index, status_blk); in bnx2x_alloc_fp_mem_at()
4562 if (!CHIP_IS_E1x(bp)) { in bnx2x_alloc_fp_mem_at()
4563 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4568 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), in bnx2x_alloc_fp_mem_at()
4579 set_sb_shortcuts(bp, index); in bnx2x_alloc_fp_mem_at()
4582 if (!skip_tx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4604 if (!skip_rx_queue(bp, index)) { in bnx2x_alloc_fp_mem_at()
4606 bnx2x_fp(bp, index, rx_buf_ring) = in bnx2x_alloc_fp_mem_at()
4608 if (!bnx2x_fp(bp, index, rx_buf_ring)) in bnx2x_alloc_fp_mem_at()
4610 bnx2x_fp(bp, index, rx_desc_ring) = in bnx2x_alloc_fp_mem_at()
4611 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), in bnx2x_alloc_fp_mem_at()
4613 if (!bnx2x_fp(bp, index, rx_desc_ring)) in bnx2x_alloc_fp_mem_at()
4617 bnx2x_fp(bp, index, rx_comp_ring) = in bnx2x_alloc_fp_mem_at()
4618 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), in bnx2x_alloc_fp_mem_at()
4620 if (!bnx2x_fp(bp, index, rx_comp_ring)) in bnx2x_alloc_fp_mem_at()
4624 bnx2x_fp(bp, index, rx_page_ring) = in bnx2x_alloc_fp_mem_at()
4627 if (!bnx2x_fp(bp, index, rx_page_ring)) in bnx2x_alloc_fp_mem_at()
4629 bnx2x_fp(bp, index, rx_sge_ring) = in bnx2x_alloc_fp_mem_at()
4630 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), in bnx2x_alloc_fp_mem_at()
4632 if (!bnx2x_fp(bp, index, rx_sge_ring)) in bnx2x_alloc_fp_mem_at()
4659 bnx2x_free_fp_mem_at(bp, index); in bnx2x_alloc_fp_mem_at()
4665 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) in bnx2x_alloc_fp_mem_cnic() argument
4667 if (!NO_FCOE(bp)) in bnx2x_alloc_fp_mem_cnic()
4669 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) in bnx2x_alloc_fp_mem_cnic()
4678 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) in bnx2x_alloc_fp_mem() argument
4687 if (bnx2x_alloc_fp_mem_at(bp, 0)) in bnx2x_alloc_fp_mem()
4691 for_each_nondefault_eth_queue(bp, i) in bnx2x_alloc_fp_mem()
4692 if (bnx2x_alloc_fp_mem_at(bp, i)) in bnx2x_alloc_fp_mem()
4696 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { in bnx2x_alloc_fp_mem()
4697 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; in bnx2x_alloc_fp_mem()
4700 bnx2x_shrink_eth_fp(bp, delta); in bnx2x_alloc_fp_mem()
4701 if (CNIC_SUPPORT(bp)) in bnx2x_alloc_fp_mem()
4708 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); in bnx2x_alloc_fp_mem()
4709 bp->num_ethernet_queues -= delta; in bnx2x_alloc_fp_mem()
4710 bp->num_queues = bp->num_ethernet_queues + in bnx2x_alloc_fp_mem()
4711 bp->num_cnic_queues; in bnx2x_alloc_fp_mem()
4713 bp->num_queues + delta, bp->num_queues); in bnx2x_alloc_fp_mem()
4719 void bnx2x_free_mem_bp(struct bnx2x *bp) in bnx2x_free_mem_bp() argument
4723 for (i = 0; i < bp->fp_array_size; i++) in bnx2x_free_mem_bp()
4724 kfree(bp->fp[i].tpa_info); in bnx2x_free_mem_bp()
4725 kfree(bp->fp); in bnx2x_free_mem_bp()
4726 kfree(bp->sp_objs); in bnx2x_free_mem_bp()
4727 kfree(bp->fp_stats); in bnx2x_free_mem_bp()
4728 kfree(bp->bnx2x_txq); in bnx2x_free_mem_bp()
4729 kfree(bp->msix_table); in bnx2x_free_mem_bp()
4730 kfree(bp->ilt); in bnx2x_free_mem_bp()
4733 int bnx2x_alloc_mem_bp(struct bnx2x *bp) in bnx2x_alloc_mem_bp() argument
4746 msix_table_size = bp->igu_sb_cnt; in bnx2x_alloc_mem_bp()
4747 if (IS_PF(bp)) in bnx2x_alloc_mem_bp()
4752 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4753 bp->fp_array_size = fp_array_size; in bnx2x_alloc_mem_bp()
4754 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); in bnx2x_alloc_mem_bp()
4756 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); in bnx2x_alloc_mem_bp()
4759 for (i = 0; i < bp->fp_array_size; i++) { in bnx2x_alloc_mem_bp()
4767 bp->fp = fp; in bnx2x_alloc_mem_bp()
4770 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), in bnx2x_alloc_mem_bp()
4772 if (!bp->sp_objs) in bnx2x_alloc_mem_bp()
4776 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), in bnx2x_alloc_mem_bp()
4778 if (!bp->fp_stats) in bnx2x_alloc_mem_bp()
4783 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); in bnx2x_alloc_mem_bp()
4786 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), in bnx2x_alloc_mem_bp()
4788 if (!bp->bnx2x_txq) in bnx2x_alloc_mem_bp()
4795 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
4801 bp->ilt = ilt; in bnx2x_alloc_mem_bp()
4805 bnx2x_free_mem_bp(bp); in bnx2x_alloc_mem_bp()
4811 struct bnx2x *bp = netdev_priv(dev); in bnx2x_reload_if_running() local
4816 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); in bnx2x_reload_if_running()
4817 return bnx2x_nic_load(bp, LOAD_NORMAL); in bnx2x_reload_if_running()
4820 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) in bnx2x_get_cur_phy_idx() argument
4823 if (bp->link_params.num_phys <= 1) in bnx2x_get_cur_phy_idx()
4826 if (bp->link_vars.link_up) { in bnx2x_get_cur_phy_idx()
4829 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && in bnx2x_get_cur_phy_idx()
4830 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) in bnx2x_get_cur_phy_idx()
4834 switch (bnx2x_phy_selection(&bp->link_params)) { in bnx2x_get_cur_phy_idx()
4849 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) in bnx2x_get_link_cfg_idx() argument
4851 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); in bnx2x_get_link_cfg_idx()
4858 if (bp->link_params.multi_phy_config & in bnx2x_get_link_cfg_idx()
4871 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fcoe_get_wwn() local
4872 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; in bnx2x_fcoe_get_wwn()
4895 struct bnx2x *bp = netdev_priv(dev); in bnx2x_change_mtu() local
4897 if (pci_num_vf(bp->pdev)) { in bnx2x_change_mtu()
4902 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_change_mtu()
4919 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) in bnx2x_change_mtu()
4920 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); in bnx2x_change_mtu()
4928 struct bnx2x *bp = netdev_priv(dev); in bnx2x_fix_features() local
4930 if (pci_num_vf(bp->pdev)) { in bnx2x_fix_features()
4936 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { in bnx2x_fix_features()
4958 struct bnx2x *bp = netdev_priv(dev); in bnx2x_set_features() local
4964 if (!pci_num_vf(bp->pdev)) { in bnx2x_set_features()
4966 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { in bnx2x_set_features()
4967 bp->link_params.loopback_mode = LOOPBACK_BMAC; in bnx2x_set_features()
4971 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { in bnx2x_set_features()
4972 bp->link_params.loopback_mode = LOOPBACK_NONE; in bnx2x_set_features()
4983 if ((changes & NETIF_F_GRO) && bp->disable_tpa) in bnx2x_set_features()
4990 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { in bnx2x_set_features()
5003 struct bnx2x *bp = netdev_priv(dev); in bnx2x_tx_timeout() local
5006 if (!bp->panic) in bnx2x_tx_timeout()
5011 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); in bnx2x_tx_timeout()
5017 struct bnx2x *bp; in bnx2x_suspend() local
5023 bp = netdev_priv(dev); in bnx2x_suspend()
5036 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); in bnx2x_suspend()
5038 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); in bnx2x_suspend()
5048 struct bnx2x *bp; in bnx2x_resume() local
5055 bp = netdev_priv(dev); in bnx2x_resume()
5057 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { in bnx2x_resume()
5071 bnx2x_set_power_state(bp, PCI_D0); in bnx2x_resume()
5074 rc = bnx2x_nic_load(bp, LOAD_OPEN); in bnx2x_resume()
5081 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, in bnx2x_set_ctx_validation() argument
5091 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
5095 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), in bnx2x_set_ctx_validation()
5099 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, in storm_memset_hc_timeout() argument
5105 REG_WR8(bp, addr, ticks); in storm_memset_hc_timeout()
5111 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, in storm_memset_hc_disable() argument
5118 u8 flags = REG_RD8(bp, addr); in storm_memset_hc_disable()
5122 REG_WR8(bp, addr, flags); in storm_memset_hc_disable()
5128 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, in bnx2x_update_coalesce_sb_index() argument
5131 int port = BP_PORT(bp); in bnx2x_update_coalesce_sb_index()
5134 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); in bnx2x_update_coalesce_sb_index()
5137 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); in bnx2x_update_coalesce_sb_index()
5140 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, in bnx2x_schedule_sp_rtnl() argument
5144 set_bit(flag, &bp->sp_rtnl_state); in bnx2x_schedule_sp_rtnl()
5148 schedule_delayed_work(&bp->sp_rtnl_task, 0); in bnx2x_schedule_sp_rtnl()