Lines Matching refs:edev

88 static int qede_alloc_rx_buffer(struct qede_dev *edev,
110 struct qede_dev *edev; in qede_netdev_event() local
124 edev = netdev_priv(ndev); in qede_netdev_event()
127 if (!edev->ops || !edev->ops->common) in qede_netdev_event()
129 edev->ops->common->set_id(edev->cdev, edev->ndev->name, in qede_netdev_event()
201 static int qede_free_tx_pkt(struct qede_dev *edev, in qede_free_tx_pkt() argument
215 DP_ERR(edev, in qede_free_tx_pkt()
235 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_tx_pkt()
242 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in qede_free_tx_pkt()
258 static void qede_free_failed_tx_pkt(struct qede_dev *edev, in qede_free_failed_tx_pkt() argument
283 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
291 dma_unmap_page(&edev->pdev->dev, in qede_free_failed_tx_pkt()
307 static u32 qede_xmit_type(struct qede_dev *edev, in qede_xmit_type() argument
363 static int map_frag_to_bd(struct qede_dev *edev, in map_frag_to_bd() argument
370 mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, in map_frag_to_bd()
373 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { in map_frag_to_bd()
374 DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); in map_frag_to_bd()
389 struct qede_dev *edev = netdev_priv(ndev); in qede_start_xmit() local
407 WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); in qede_start_xmit()
408 txq = QEDE_TX_QUEUE(edev, txq_index); in qede_start_xmit()
419 xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); in qede_start_xmit()
431 mapping = dma_map_single(&edev->pdev->dev, skb->data, in qede_start_xmit()
433 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { in qede_start_xmit()
434 DP_NOTICE(edev, "SKB mapping failed\n"); in qede_start_xmit()
435 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); in qede_start_xmit()
500 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, in qede_start_xmit()
528 rc = map_frag_to_bd(edev, in qede_start_xmit()
532 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, in qede_start_xmit()
552 rc = map_frag_to_bd(edev, in qede_start_xmit()
556 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, in qede_start_xmit()
596 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, in qede_start_xmit()
606 (edev->state == QEDE_STATE_OPEN)) { in qede_start_xmit()
608 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, in qede_start_xmit()
629 static int qede_tx_int(struct qede_dev *edev, in qede_tx_int() argument
637 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); in qede_tx_int()
645 rc = qede_free_tx_pkt(edev, txq, &len); in qede_tx_int()
647 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", in qede_tx_int()
685 (edev->state == QEDE_STATE_OPEN) && in qede_tx_int()
689 DP_VERBOSE(edev, NETIF_MSG_TX_DONE, in qede_tx_int()
716 for (tc = 0; tc < fp->edev->num_tc; tc++) in qede_has_tx_work()
744 static inline void qede_update_rx_prod(struct qede_dev *edev, in qede_update_rx_prod() argument
773 static u32 qede_get_rxhash(struct qede_dev *edev, in qede_get_rxhash() argument
782 if ((edev->ndev->features & NETIF_F_RXHASH) && htype) { in qede_get_rxhash()
800 static inline void qede_skb_receive(struct qede_dev *edev, in qede_skb_receive() argument
835 struct qede_dev *edev = fp->edev; in qede_rx_int() local
870 edev->ops->eth_cqe_completion( in qede_rx_int()
871 edev->cdev, fp->rss_id, in qede_rx_int()
891 if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { in qede_rx_int()
892 dma_unmap_single(&edev->pdev->dev, in qede_rx_int()
901 DP_NOTICE(edev, in qede_rx_int()
912 DP_NOTICE(edev, in qede_rx_int()
922 DP_NOTICE(edev, in qede_rx_int()
933 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_rx_int()
935 rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, in qede_rx_int()
945 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_int()
964 qede_update_rx_prod(edev, rxq); in qede_rx_int()
974 struct qede_dev *edev = fp->edev; in qede_poll() local
979 for (tc = 0; tc < edev->num_tc; tc++) in qede_poll()
981 qede_tx_int(edev, &fp->txqs[tc]); in qede_poll()
1044 static int qede_set_ucast_rx_mac(struct qede_dev *edev, in qede_set_ucast_rx_mac() argument
1056 return edev->ops->filter_config(edev->cdev, &filter_cmd); in qede_set_ucast_rx_mac()
1059 void qede_fill_by_demand_stats(struct qede_dev *edev) in qede_fill_by_demand_stats() argument
1063 edev->ops->get_vport_stats(edev->cdev, &stats); in qede_fill_by_demand_stats()
1064 edev->stats.no_buff_discards = stats.no_buff_discards; in qede_fill_by_demand_stats()
1065 edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; in qede_fill_by_demand_stats()
1066 edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; in qede_fill_by_demand_stats()
1067 edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; in qede_fill_by_demand_stats()
1068 edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; in qede_fill_by_demand_stats()
1069 edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; in qede_fill_by_demand_stats()
1070 edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; in qede_fill_by_demand_stats()
1071 edev->stats.mftag_filter_discards = stats.mftag_filter_discards; in qede_fill_by_demand_stats()
1072 edev->stats.mac_filter_discards = stats.mac_filter_discards; in qede_fill_by_demand_stats()
1074 edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes; in qede_fill_by_demand_stats()
1075 edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; in qede_fill_by_demand_stats()
1076 edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; in qede_fill_by_demand_stats()
1077 edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; in qede_fill_by_demand_stats()
1078 edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; in qede_fill_by_demand_stats()
1079 edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; in qede_fill_by_demand_stats()
1080 edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; in qede_fill_by_demand_stats()
1081 edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; in qede_fill_by_demand_stats()
1082 edev->stats.coalesced_events = stats.tpa_coalesced_events; in qede_fill_by_demand_stats()
1083 edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; in qede_fill_by_demand_stats()
1084 edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; in qede_fill_by_demand_stats()
1085 edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; in qede_fill_by_demand_stats()
1087 edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; in qede_fill_by_demand_stats()
1088 edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; in qede_fill_by_demand_stats()
1089 edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; in qede_fill_by_demand_stats()
1090 edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; in qede_fill_by_demand_stats()
1091 edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; in qede_fill_by_demand_stats()
1092 edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; in qede_fill_by_demand_stats()
1093 edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; in qede_fill_by_demand_stats()
1094 edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; in qede_fill_by_demand_stats()
1095 edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; in qede_fill_by_demand_stats()
1096 edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; in qede_fill_by_demand_stats()
1097 edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; in qede_fill_by_demand_stats()
1098 edev->stats.rx_crc_errors = stats.rx_crc_errors; in qede_fill_by_demand_stats()
1099 edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; in qede_fill_by_demand_stats()
1100 edev->stats.rx_pause_frames = stats.rx_pause_frames; in qede_fill_by_demand_stats()
1101 edev->stats.rx_pfc_frames = stats.rx_pfc_frames; in qede_fill_by_demand_stats()
1102 edev->stats.rx_align_errors = stats.rx_align_errors; in qede_fill_by_demand_stats()
1103 edev->stats.rx_carrier_errors = stats.rx_carrier_errors; in qede_fill_by_demand_stats()
1104 edev->stats.rx_oversize_packets = stats.rx_oversize_packets; in qede_fill_by_demand_stats()
1105 edev->stats.rx_jabbers = stats.rx_jabbers; in qede_fill_by_demand_stats()
1106 edev->stats.rx_undersize_packets = stats.rx_undersize_packets; in qede_fill_by_demand_stats()
1107 edev->stats.rx_fragments = stats.rx_fragments; in qede_fill_by_demand_stats()
1108 edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; in qede_fill_by_demand_stats()
1109 edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; in qede_fill_by_demand_stats()
1110 edev->stats.tx_128_to_255_byte_packets = in qede_fill_by_demand_stats()
1112 edev->stats.tx_256_to_511_byte_packets = in qede_fill_by_demand_stats()
1114 edev->stats.tx_512_to_1023_byte_packets = in qede_fill_by_demand_stats()
1116 edev->stats.tx_1024_to_1518_byte_packets = in qede_fill_by_demand_stats()
1118 edev->stats.tx_1519_to_2047_byte_packets = in qede_fill_by_demand_stats()
1120 edev->stats.tx_2048_to_4095_byte_packets = in qede_fill_by_demand_stats()
1122 edev->stats.tx_4096_to_9216_byte_packets = in qede_fill_by_demand_stats()
1124 edev->stats.tx_9217_to_16383_byte_packets = in qede_fill_by_demand_stats()
1126 edev->stats.tx_pause_frames = stats.tx_pause_frames; in qede_fill_by_demand_stats()
1127 edev->stats.tx_pfc_frames = stats.tx_pfc_frames; in qede_fill_by_demand_stats()
1128 edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; in qede_fill_by_demand_stats()
1129 edev->stats.tx_total_collisions = stats.tx_total_collisions; in qede_fill_by_demand_stats()
1130 edev->stats.brb_truncates = stats.brb_truncates; in qede_fill_by_demand_stats()
1131 edev->stats.brb_discards = stats.brb_discards; in qede_fill_by_demand_stats()
1132 edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; in qede_fill_by_demand_stats()
1139 struct qede_dev *edev = netdev_priv(dev); in qede_get_stats64() local
1141 qede_fill_by_demand_stats(edev); in qede_get_stats64()
1143 stats->rx_packets = edev->stats.rx_ucast_pkts + in qede_get_stats64()
1144 edev->stats.rx_mcast_pkts + in qede_get_stats64()
1145 edev->stats.rx_bcast_pkts; in qede_get_stats64()
1146 stats->tx_packets = edev->stats.tx_ucast_pkts + in qede_get_stats64()
1147 edev->stats.tx_mcast_pkts + in qede_get_stats64()
1148 edev->stats.tx_bcast_pkts; in qede_get_stats64()
1150 stats->rx_bytes = edev->stats.rx_ucast_bytes + in qede_get_stats64()
1151 edev->stats.rx_mcast_bytes + in qede_get_stats64()
1152 edev->stats.rx_bcast_bytes; in qede_get_stats64()
1154 stats->tx_bytes = edev->stats.tx_ucast_bytes + in qede_get_stats64()
1155 edev->stats.tx_mcast_bytes + in qede_get_stats64()
1156 edev->stats.tx_bcast_bytes; in qede_get_stats64()
1158 stats->tx_errors = edev->stats.tx_err_drop_pkts; in qede_get_stats64()
1159 stats->multicast = edev->stats.rx_mcast_pkts + in qede_get_stats64()
1160 edev->stats.rx_bcast_pkts; in qede_get_stats64()
1162 stats->rx_fifo_errors = edev->stats.no_buff_discards; in qede_get_stats64()
1164 stats->collisions = edev->stats.tx_total_collisions; in qede_get_stats64()
1165 stats->rx_crc_errors = edev->stats.rx_crc_errors; in qede_get_stats64()
1166 stats->rx_frame_errors = edev->stats.rx_align_errors; in qede_get_stats64()
1194 struct qede_dev *edev; in qede_alloc_etherdev() local
1196 ndev = alloc_etherdev_mqs(sizeof(*edev), in qede_alloc_etherdev()
1204 edev = netdev_priv(ndev); in qede_alloc_etherdev()
1205 edev->ndev = ndev; in qede_alloc_etherdev()
1206 edev->cdev = cdev; in qede_alloc_etherdev()
1207 edev->pdev = pdev; in qede_alloc_etherdev()
1208 edev->dp_module = dp_module; in qede_alloc_etherdev()
1209 edev->dp_level = dp_level; in qede_alloc_etherdev()
1210 edev->ops = qed_ops; in qede_alloc_etherdev()
1211 edev->q_num_rx_buffers = NUM_RX_BDS_DEF; in qede_alloc_etherdev()
1212 edev->q_num_tx_buffers = NUM_TX_BDS_DEF; in qede_alloc_etherdev()
1214 DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); in qede_alloc_etherdev()
1218 memset(&edev->stats, 0, sizeof(edev->stats)); in qede_alloc_etherdev()
1219 memcpy(&edev->dev_info, info, sizeof(*info)); in qede_alloc_etherdev()
1221 edev->num_tc = edev->dev_info.num_tc; in qede_alloc_etherdev()
1223 return edev; in qede_alloc_etherdev()
1226 static void qede_init_ndev(struct qede_dev *edev) in qede_init_ndev() argument
1228 struct net_device *ndev = edev->ndev; in qede_init_ndev()
1229 struct pci_dev *pdev = edev->pdev; in qede_init_ndev()
1234 ndev->mem_start = edev->dev_info.common.pci_mem_start; in qede_init_ndev()
1236 ndev->mem_end = edev->dev_info.common.pci_mem_end; in qede_init_ndev()
1237 ndev->irq = edev->dev_info.common.pci_irq; in qede_init_ndev()
1259 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); in qede_init_ndev()
1288 static void qede_free_fp_array(struct qede_dev *edev) in qede_free_fp_array() argument
1290 if (edev->fp_array) { in qede_free_fp_array()
1295 fp = &edev->fp_array[i]; in qede_free_fp_array()
1301 kfree(edev->fp_array); in qede_free_fp_array()
1303 edev->num_rss = 0; in qede_free_fp_array()
1306 static int qede_alloc_fp_array(struct qede_dev *edev) in qede_alloc_fp_array() argument
1311 edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), in qede_alloc_fp_array()
1312 sizeof(*edev->fp_array), GFP_KERNEL); in qede_alloc_fp_array()
1313 if (!edev->fp_array) { in qede_alloc_fp_array()
1314 DP_NOTICE(edev, "fp array allocation failed\n"); in qede_alloc_fp_array()
1319 fp = &edev->fp_array[i]; in qede_alloc_fp_array()
1323 DP_NOTICE(edev, "sb info struct allocation failed\n"); in qede_alloc_fp_array()
1329 DP_NOTICE(edev, "RXQ struct allocation failed\n"); in qede_alloc_fp_array()
1333 fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); in qede_alloc_fp_array()
1335 DP_NOTICE(edev, "TXQ array allocation failed\n"); in qede_alloc_fp_array()
1342 qede_free_fp_array(edev); in qede_alloc_fp_array()
1348 struct qede_dev *edev = container_of(work, struct qede_dev, in qede_sp_task() local
1350 mutex_lock(&edev->qede_lock); in qede_sp_task()
1352 if (edev->state == QEDE_STATE_OPEN) { in qede_sp_task()
1353 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) in qede_sp_task()
1354 qede_config_rx_mode(edev->ndev); in qede_sp_task()
1357 mutex_unlock(&edev->qede_lock); in qede_sp_task()
1379 struct qede_dev *edev; in __qede_probe() local
1414 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, in __qede_probe()
1416 if (!edev) { in __qede_probe()
1421 qede_init_ndev(edev); in __qede_probe()
1423 rc = register_netdev(edev->ndev); in __qede_probe()
1425 DP_NOTICE(edev, "Cannot register net-device\n"); in __qede_probe()
1429 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); in __qede_probe()
1431 edev->ops->register_ops(cdev, &qede_ll_ops, edev); in __qede_probe()
1433 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); in __qede_probe()
1434 mutex_init(&edev->qede_lock); in __qede_probe()
1436 DP_INFO(edev, "Ending successfully qede probe\n"); in __qede_probe()
1441 free_netdev(edev->ndev); in __qede_probe()
1468 struct qede_dev *edev = netdev_priv(ndev); in __qede_remove() local
1469 struct qed_dev *cdev = edev->cdev; in __qede_remove()
1471 DP_INFO(edev, "Starting qede_remove\n"); in __qede_remove()
1473 cancel_delayed_work_sync(&edev->sp_task); in __qede_remove()
1476 edev->ops->common->set_power_state(cdev, PCI_D0); in __qede_remove()
1499 static int qede_set_num_queues(struct qede_dev *edev) in qede_set_num_queues() argument
1506 edev->dev_info.common.num_hwfns; in qede_set_num_queues()
1508 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); in qede_set_num_queues()
1510 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); in qede_set_num_queues()
1513 edev->num_rss = rc; in qede_set_num_queues()
1514 DP_INFO(edev, "Managed %d [of %d] RSS queues\n", in qede_set_num_queues()
1515 QEDE_RSS_CNT(edev), rss_num); in qede_set_num_queues()
1521 static void qede_free_mem_sb(struct qede_dev *edev, in qede_free_mem_sb() argument
1525 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), in qede_free_mem_sb()
1530 static int qede_alloc_mem_sb(struct qede_dev *edev, in qede_alloc_mem_sb() argument
1538 sb_virt = dma_alloc_coherent(&edev->pdev->dev, in qede_alloc_mem_sb()
1542 DP_ERR(edev, "Status block allocation failed\n"); in qede_alloc_mem_sb()
1546 rc = edev->ops->common->sb_init(edev->cdev, sb_info, in qede_alloc_mem_sb()
1550 DP_ERR(edev, "Status block initialization failed\n"); in qede_alloc_mem_sb()
1551 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), in qede_alloc_mem_sb()
1559 static void qede_free_rx_buffers(struct qede_dev *edev, in qede_free_rx_buffers() argument
1571 dma_unmap_single(&edev->pdev->dev, in qede_free_rx_buffers()
1580 static void qede_free_mem_rxq(struct qede_dev *edev, in qede_free_mem_rxq() argument
1584 qede_free_rx_buffers(edev, rxq); in qede_free_mem_rxq()
1590 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); in qede_free_mem_rxq()
1591 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); in qede_free_mem_rxq()
1594 static int qede_alloc_rx_buffer(struct qede_dev *edev, in qede_alloc_rx_buffer() argument
1607 DP_NOTICE(edev, "Failed to allocate Rx data\n"); in qede_alloc_rx_buffer()
1611 mapping = dma_map_single(&edev->pdev->dev, data, in qede_alloc_rx_buffer()
1613 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { in qede_alloc_rx_buffer()
1615 DP_NOTICE(edev, "Failed to map Rx buffer\n"); in qede_alloc_rx_buffer()
1636 static int qede_alloc_mem_rxq(struct qede_dev *edev, in qede_alloc_mem_rxq() argument
1641 rxq->num_rx_buffers = edev->q_num_rx_buffers; in qede_alloc_mem_rxq()
1645 edev->ndev->mtu + in qede_alloc_mem_rxq()
1652 DP_ERR(edev, "Rx buffers ring allocation failed\n"); in qede_alloc_mem_rxq()
1657 rc = edev->ops->common->chain_alloc(edev->cdev, in qede_alloc_mem_rxq()
1668 rc = edev->ops->common->chain_alloc(edev->cdev, in qede_alloc_mem_rxq()
1679 rc = qede_alloc_rx_buffer(edev, rxq); in qede_alloc_mem_rxq()
1685 DP_ERR(edev, "Rx buffers allocation failed\n"); in qede_alloc_mem_rxq()
1688 DP_NOTICE(edev, in qede_alloc_mem_rxq()
1696 qede_free_mem_rxq(edev, rxq); in qede_alloc_mem_rxq()
1700 static void qede_free_mem_txq(struct qede_dev *edev, in qede_free_mem_txq() argument
1707 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); in qede_free_mem_txq()
1711 static int qede_alloc_mem_txq(struct qede_dev *edev, in qede_alloc_mem_txq() argument
1717 txq->num_tx_buffers = edev->q_num_tx_buffers; in qede_alloc_mem_txq()
1723 DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); in qede_alloc_mem_txq()
1727 rc = edev->ops->common->chain_alloc(edev->cdev, in qede_alloc_mem_txq()
1739 qede_free_mem_txq(edev, txq); in qede_alloc_mem_txq()
1744 static void qede_free_mem_fp(struct qede_dev *edev, in qede_free_mem_fp() argument
1749 qede_free_mem_sb(edev, fp->sb_info); in qede_free_mem_fp()
1751 qede_free_mem_rxq(edev, fp->rxq); in qede_free_mem_fp()
1753 for (tc = 0; tc < edev->num_tc; tc++) in qede_free_mem_fp()
1754 qede_free_mem_txq(edev, &fp->txqs[tc]); in qede_free_mem_fp()
1760 static int qede_alloc_mem_fp(struct qede_dev *edev, in qede_alloc_mem_fp() argument
1765 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); in qede_alloc_mem_fp()
1769 rc = qede_alloc_mem_rxq(edev, fp->rxq); in qede_alloc_mem_fp()
1773 for (tc = 0; tc < edev->num_tc; tc++) { in qede_alloc_mem_fp()
1774 rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); in qede_alloc_mem_fp()
1782 qede_free_mem_fp(edev, fp); in qede_alloc_mem_fp()
1786 static void qede_free_mem_load(struct qede_dev *edev) in qede_free_mem_load() argument
1791 struct qede_fastpath *fp = &edev->fp_array[i]; in qede_free_mem_load()
1793 qede_free_mem_fp(edev, fp); in qede_free_mem_load()
1798 static int qede_alloc_mem_load(struct qede_dev *edev) in qede_alloc_mem_load() argument
1802 for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { in qede_alloc_mem_load()
1803 struct qede_fastpath *fp = &edev->fp_array[rss_id]; in qede_alloc_mem_load()
1805 rc = qede_alloc_mem_fp(edev, fp); in qede_alloc_mem_load()
1810 if (rss_id != QEDE_RSS_CNT(edev)) { in qede_alloc_mem_load()
1813 DP_ERR(edev, in qede_alloc_mem_load()
1817 DP_NOTICE(edev, in qede_alloc_mem_load()
1819 QEDE_RSS_CNT(edev), rss_id); in qede_alloc_mem_load()
1821 edev->num_rss = rss_id; in qede_alloc_mem_load()
1828 static void qede_init_fp(struct qede_dev *edev) in qede_init_fp() argument
1834 fp = &edev->fp_array[rss_id]; in qede_init_fp()
1836 fp->edev = edev; in qede_init_fp()
1846 memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); in qede_init_fp()
1847 for (tc = 0; tc < edev->num_tc; tc++) { in qede_init_fp()
1848 txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; in qede_init_fp()
1853 edev->ndev->name, rss_id); in qede_init_fp()
1857 static int qede_set_real_num_queues(struct qede_dev *edev) in qede_set_real_num_queues() argument
1861 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); in qede_set_real_num_queues()
1863 DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); in qede_set_real_num_queues()
1866 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); in qede_set_real_num_queues()
1868 DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); in qede_set_real_num_queues()
1875 static void qede_napi_disable_remove(struct qede_dev *edev) in qede_napi_disable_remove() argument
1880 napi_disable(&edev->fp_array[i].napi); in qede_napi_disable_remove()
1882 netif_napi_del(&edev->fp_array[i].napi); in qede_napi_disable_remove()
1886 static void qede_napi_add_enable(struct qede_dev *edev) in qede_napi_add_enable() argument
1892 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, in qede_napi_add_enable()
1894 napi_enable(&edev->fp_array[i].napi); in qede_napi_add_enable()
1898 static void qede_sync_free_irqs(struct qede_dev *edev) in qede_sync_free_irqs() argument
1902 for (i = 0; i < edev->int_info.used_cnt; i++) { in qede_sync_free_irqs()
1903 if (edev->int_info.msix_cnt) { in qede_sync_free_irqs()
1904 synchronize_irq(edev->int_info.msix[i].vector); in qede_sync_free_irqs()
1905 free_irq(edev->int_info.msix[i].vector, in qede_sync_free_irqs()
1906 &edev->fp_array[i]); in qede_sync_free_irqs()
1908 edev->ops->common->simd_handler_clean(edev->cdev, i); in qede_sync_free_irqs()
1912 edev->int_info.used_cnt = 0; in qede_sync_free_irqs()
1915 static int qede_req_msix_irqs(struct qede_dev *edev) in qede_req_msix_irqs() argument
1920 if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { in qede_req_msix_irqs()
1921 DP_ERR(edev, in qede_req_msix_irqs()
1923 QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); in qede_req_msix_irqs()
1927 for (i = 0; i < QEDE_RSS_CNT(edev); i++) { in qede_req_msix_irqs()
1928 rc = request_irq(edev->int_info.msix[i].vector, in qede_req_msix_irqs()
1929 qede_msix_fp_int, 0, edev->fp_array[i].name, in qede_req_msix_irqs()
1930 &edev->fp_array[i]); in qede_req_msix_irqs()
1932 DP_ERR(edev, "Request fp %d irq failed\n", i); in qede_req_msix_irqs()
1933 qede_sync_free_irqs(edev); in qede_req_msix_irqs()
1936 DP_VERBOSE(edev, NETIF_MSG_INTR, in qede_req_msix_irqs()
1938 edev->fp_array[i].name, i, in qede_req_msix_irqs()
1939 &edev->fp_array[i]); in qede_req_msix_irqs()
1940 edev->int_info.used_cnt++; in qede_req_msix_irqs()
1953 static int qede_setup_irqs(struct qede_dev *edev) in qede_setup_irqs() argument
1958 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); in qede_setup_irqs()
1962 if (edev->int_info.msix_cnt) { in qede_setup_irqs()
1963 rc = qede_req_msix_irqs(edev); in qede_setup_irqs()
1966 edev->ndev->irq = edev->int_info.msix[0].vector; in qede_setup_irqs()
1971 ops = edev->ops->common; in qede_setup_irqs()
1972 for (i = 0; i < QEDE_RSS_CNT(edev); i++) in qede_setup_irqs()
1973 ops->simd_handler_config(edev->cdev, in qede_setup_irqs()
1974 &edev->fp_array[i], i, in qede_setup_irqs()
1976 edev->int_info.used_cnt = QEDE_RSS_CNT(edev); in qede_setup_irqs()
1981 static int qede_drain_txq(struct qede_dev *edev, in qede_drain_txq() argument
1990 DP_NOTICE(edev, in qede_drain_txq()
1993 rc = edev->ops->common->drain(edev->cdev); in qede_drain_txq()
1996 return qede_drain_txq(edev, txq, false); in qede_drain_txq()
1998 DP_NOTICE(edev, in qede_drain_txq()
2015 static int qede_stop_queues(struct qede_dev *edev) in qede_stop_queues() argument
2018 struct qed_dev *cdev = edev->cdev; in qede_stop_queues()
2028 rc = edev->ops->vport_update(cdev, &vport_update_params); in qede_stop_queues()
2030 DP_ERR(edev, "Failed to update vport\n"); in qede_stop_queues()
2036 struct qede_fastpath *fp = &edev->fp_array[i]; in qede_stop_queues()
2038 for (tc = 0; tc < edev->num_tc; tc++) { in qede_stop_queues()
2041 rc = qede_drain_txq(edev, txq, true); in qede_stop_queues()
2048 for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { in qede_stop_queues()
2052 for (tc = 0; tc < edev->num_tc; tc++) { in qede_stop_queues()
2056 tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; in qede_stop_queues()
2057 rc = edev->ops->q_tx_stop(cdev, &tx_params); in qede_stop_queues()
2059 DP_ERR(edev, "Failed to stop TXQ #%d\n", in qede_stop_queues()
2070 rc = edev->ops->q_rx_stop(cdev, &rx_params); in qede_stop_queues()
2072 DP_ERR(edev, "Failed to stop RXQ #%d\n", i); in qede_stop_queues()
2078 rc = edev->ops->vport_stop(cdev, 0); in qede_stop_queues()
2080 DP_ERR(edev, "Failed to stop VPORT\n"); in qede_stop_queues()
2085 static int qede_start_queues(struct qede_dev *edev) in qede_start_queues() argument
2089 struct qed_dev *cdev = edev->cdev; in qede_start_queues()
2090 struct qed_update_vport_rss_params *rss_params = &edev->rss_params; in qede_start_queues()
2094 if (!edev->num_rss) { in qede_start_queues()
2095 DP_ERR(edev, in qede_start_queues()
2100 rc = edev->ops->vport_start(cdev, vport_id, in qede_start_queues()
2101 edev->ndev->mtu, in qede_start_queues()
2106 DP_ERR(edev, "Start V-PORT failed %d\n", rc); in qede_start_queues()
2110 DP_VERBOSE(edev, NETIF_MSG_IFUP, in qede_start_queues()
2112 vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); in qede_start_queues()
2115 struct qede_fastpath *fp = &edev->fp_array[i]; in qede_start_queues()
2125 rc = edev->ops->q_rx_start(cdev, &q_params, in qede_start_queues()
2132 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); in qede_start_queues()
2138 qede_update_rx_prod(edev, fp->rxq); in qede_start_queues()
2140 for (tc = 0; tc < edev->num_tc; tc++) { in qede_start_queues()
2142 int txq_index = tc * QEDE_RSS_CNT(edev) + i; in qede_start_queues()
2151 rc = edev->ops->q_tx_start(cdev, &q_params, in qede_start_queues()
2156 DP_ERR(edev, "Start TXQ #%d failed %d\n", in qede_start_queues()
2182 if (QEDE_RSS_CNT(edev) > 1) { in qede_start_queues()
2186 ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); in qede_start_queues()
2195 rc = edev->ops->vport_update(cdev, &vport_update_params); in qede_start_queues()
2197 DP_ERR(edev, "Update V-PORT failed %d\n", rc); in qede_start_queues()
2204 static int qede_set_mcast_rx_mac(struct qede_dev *edev, in qede_set_mcast_rx_mac() argument
2219 return edev->ops->filter_config(edev->cdev, &filter_cmd); in qede_set_mcast_rx_mac()
2226 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) in qede_unload() argument
2231 DP_INFO(edev, "Starting qede unload\n"); in qede_unload()
2233 mutex_lock(&edev->qede_lock); in qede_unload()
2234 edev->state = QEDE_STATE_CLOSED; in qede_unload()
2237 netif_tx_disable(edev->ndev); in qede_unload()
2238 netif_carrier_off(edev->ndev); in qede_unload()
2243 edev->ops->common->set_link(edev->cdev, &link_params); in qede_unload()
2244 rc = qede_stop_queues(edev); in qede_unload()
2246 qede_sync_free_irqs(edev); in qede_unload()
2250 DP_INFO(edev, "Stopped Queues\n"); in qede_unload()
2252 edev->ops->fastpath_stop(edev->cdev); in qede_unload()
2255 qede_sync_free_irqs(edev); in qede_unload()
2256 edev->ops->common->set_fp_int(edev->cdev, 0); in qede_unload()
2258 qede_napi_disable_remove(edev); in qede_unload()
2260 qede_free_mem_load(edev); in qede_unload()
2261 qede_free_fp_array(edev); in qede_unload()
2264 mutex_unlock(&edev->qede_lock); in qede_unload()
2265 DP_INFO(edev, "Ending qede unload\n"); in qede_unload()
2272 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) in qede_load() argument
2278 DP_INFO(edev, "Starting qede load\n"); in qede_load()
2280 rc = qede_set_num_queues(edev); in qede_load()
2284 rc = qede_alloc_fp_array(edev); in qede_load()
2288 qede_init_fp(edev); in qede_load()
2290 rc = qede_alloc_mem_load(edev); in qede_load()
2293 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", in qede_load()
2294 QEDE_RSS_CNT(edev), edev->num_tc); in qede_load()
2296 rc = qede_set_real_num_queues(edev); in qede_load()
2300 qede_napi_add_enable(edev); in qede_load()
2301 DP_INFO(edev, "Napi added and enabled\n"); in qede_load()
2303 rc = qede_setup_irqs(edev); in qede_load()
2306 DP_INFO(edev, "Setup IRQs succeeded\n"); in qede_load()
2308 rc = qede_start_queues(edev); in qede_load()
2311 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); in qede_load()
2314 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); in qede_load()
2316 mutex_lock(&edev->qede_lock); in qede_load()
2317 edev->state = QEDE_STATE_OPEN; in qede_load()
2318 mutex_unlock(&edev->qede_lock); in qede_load()
2323 edev->ops->common->set_link(edev->cdev, &link_params); in qede_load()
2327 edev->ops->common->get_link(edev->cdev, &link_output); in qede_load()
2328 qede_link_update(edev, &link_output); in qede_load()
2330 DP_INFO(edev, "Ending successfully qede load\n"); in qede_load()
2335 qede_sync_free_irqs(edev); in qede_load()
2336 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); in qede_load()
2338 qede_napi_disable_remove(edev); in qede_load()
2340 qede_free_mem_load(edev); in qede_load()
2342 edev->ops->common->set_fp_int(edev->cdev, 0); in qede_load()
2343 qede_free_fp_array(edev); in qede_load()
2344 edev->num_rss = 0; in qede_load()
2349 void qede_reload(struct qede_dev *edev, in qede_reload() argument
2353 qede_unload(edev, QEDE_UNLOAD_NORMAL); in qede_reload()
2358 func(edev, args); in qede_reload()
2360 qede_load(edev, QEDE_LOAD_NORMAL); in qede_reload()
2362 mutex_lock(&edev->qede_lock); in qede_reload()
2363 qede_config_rx_mode(edev->ndev); in qede_reload()
2364 mutex_unlock(&edev->qede_lock); in qede_reload()
2370 struct qede_dev *edev = netdev_priv(ndev); in qede_open() local
2374 edev->ops->common->set_power_state(edev->cdev, PCI_D0); in qede_open()
2376 return qede_load(edev, QEDE_LOAD_NORMAL); in qede_open()
2381 struct qede_dev *edev = netdev_priv(ndev); in qede_close() local
2383 qede_unload(edev, QEDE_UNLOAD_NORMAL); in qede_close()
2390 struct qede_dev *edev = dev; in qede_link_update() local
2392 if (!netif_running(edev->ndev)) { in qede_link_update()
2393 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); in qede_link_update()
2398 DP_NOTICE(edev, "Link is up\n"); in qede_link_update()
2399 netif_tx_start_all_queues(edev->ndev); in qede_link_update()
2400 netif_carrier_on(edev->ndev); in qede_link_update()
2402 DP_NOTICE(edev, "Link is down\n"); in qede_link_update()
2403 netif_tx_disable(edev->ndev); in qede_link_update()
2404 netif_carrier_off(edev->ndev); in qede_link_update()
2410 struct qede_dev *edev = netdev_priv(ndev); in qede_set_mac_addr() local
2416 DP_INFO(edev, "Set_mac_addr called\n"); in qede_set_mac_addr()
2419 DP_NOTICE(edev, "The MAC address is not valid\n"); in qede_set_mac_addr()
2426 DP_NOTICE(edev, "The device is currently down\n"); in qede_set_mac_addr()
2431 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, in qede_set_mac_addr()
2432 edev->primary_mac); in qede_set_mac_addr()
2437 ether_addr_copy(edev->primary_mac, ndev->dev_addr); in qede_set_mac_addr()
2438 return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, in qede_set_mac_addr()
2439 edev->primary_mac); in qede_set_mac_addr()
2446 struct qede_dev *edev = netdev_priv(ndev); in qede_configure_mcast_filtering() local
2456 DP_NOTICE(edev, in qede_configure_mcast_filtering()
2465 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, in qede_configure_mcast_filtering()
2489 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, in qede_configure_mcast_filtering()
2500 struct qede_dev *edev = netdev_priv(ndev); in qede_set_rx_mode() local
2502 DP_INFO(edev, "qede_set_rx_mode called\n"); in qede_set_rx_mode()
2504 if (edev->state != QEDE_STATE_OPEN) { in qede_set_rx_mode()
2505 DP_INFO(edev, in qede_set_rx_mode()
2508 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); in qede_set_rx_mode()
2509 schedule_delayed_work(&edev->sp_task, 0); in qede_set_rx_mode()
2517 struct qede_dev *edev = netdev_priv(ndev); in qede_config_rx_mode() local
2531 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); in qede_config_rx_mode()
2551 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, in qede_config_rx_mode()
2552 edev->primary_mac); in qede_config_rx_mode()
2566 rc = qede_set_ucast_rx_mac(edev, in qede_config_rx_mode()
2581 edev->ops->filter_config(edev->cdev, &rx_mode); in qede_config_rx_mode()