Lines Matching refs:enic

115 int enic_is_dynamic(struct enic *enic)  in enic_is_dynamic()  argument
117 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
120 int enic_sriov_enabled(struct enic *enic) in enic_sriov_enabled() argument
122 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
125 static int enic_is_sriov_vf(struct enic *enic) in enic_is_sriov_vf() argument
127 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
130 int enic_is_valid_vf(struct enic *enic, int vf) in enic_is_valid_vf() argument
133 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
141 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() local
144 pci_unmap_single(enic->pdev, buf->dma_addr, in enic_free_wq_buf()
147 pci_unmap_page(enic->pdev, buf->dma_addr, in enic_free_wq_buf()
163 struct enic *enic = vnic_dev_priv(vdev); in enic_wq_service() local
165 spin_lock(&enic->wq_lock[q_number]); in enic_wq_service()
167 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service()
171 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && in enic_wq_service()
172 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service()
174 netif_wake_subqueue(enic->netdev, q_number); in enic_wq_service()
176 spin_unlock(&enic->wq_lock[q_number]); in enic_wq_service()
181 static void enic_log_q_error(struct enic *enic) in enic_log_q_error() argument
186 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
187 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error()
189 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
193 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
194 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error()
196 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
201 static void enic_msglvl_check(struct enic *enic) in enic_msglvl_check() argument
203 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
205 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
206 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
207 enic->msg_enable, msg_enable); in enic_msglvl_check()
208 enic->msg_enable = msg_enable; in enic_msglvl_check()
212 static void enic_mtu_check(struct enic *enic) in enic_mtu_check() argument
214 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
215 struct net_device *netdev = enic->netdev; in enic_mtu_check()
217 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
218 enic->port_mtu = mtu; in enic_mtu_check()
219 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_mtu_check()
223 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
234 static void enic_link_check(struct enic *enic) in enic_link_check() argument
236 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
237 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
240 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
241 netif_carrier_on(enic->netdev); in enic_link_check()
243 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
244 netif_carrier_off(enic->netdev); in enic_link_check()
248 static void enic_notify_check(struct enic *enic) in enic_notify_check() argument
250 enic_msglvl_check(enic); in enic_notify_check()
251 enic_mtu_check(enic); in enic_notify_check()
252 enic_link_check(enic); in enic_notify_check()
260 struct enic *enic = netdev_priv(netdev); in enic_isr_legacy() local
266 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
268 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
270 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
275 enic_notify_check(enic); in enic_isr_legacy()
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
280 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
281 enic_log_q_error(enic); in enic_isr_legacy()
283 schedule_work(&enic->reset); in enic_isr_legacy()
288 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
290 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
297 struct enic *enic = data; in enic_isr_msi() local
315 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
331 struct enic *enic = data; in enic_isr_msix_err() local
332 unsigned int intr = enic_msix_err_intr(enic); in enic_isr_msix_err()
334 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
336 enic_log_q_error(enic); in enic_isr_msix_err()
339 schedule_work(&enic->reset); in enic_isr_msix_err()
346 struct enic *enic = data; in enic_isr_msix_notify() local
347 unsigned int intr = enic_msix_notify_intr(enic); in enic_isr_msix_notify()
349 enic_notify_check(enic); in enic_isr_msix_notify()
350 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
355 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument
365 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
368 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_cont()
378 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument
388 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, in enic_queue_wq_skb_vlan()
390 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_vlan()
402 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_vlan()
407 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_csum_l4() argument
419 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
421 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_csum_l4()
434 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_csum_l4()
439 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_tso() argument
472 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, in enic_queue_wq_skb_tso()
474 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
497 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
500 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
514 static inline void enic_queue_wq_skb(struct enic *enic, in enic_queue_wq_skb() argument
527 } else if (enic->loop_enable) { in enic_queue_wq_skb()
528 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
533 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, in enic_queue_wq_skb()
537 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
540 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
563 struct enic *enic = netdev_priv(netdev); in enic_hard_start_xmit() local
573 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
574 wq = &enic->wq[txq_map]; in enic_hard_start_xmit()
589 spin_lock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
596 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
600 enic_queue_wq_skb(enic, wq, skb); in enic_hard_start_xmit()
607 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
616 struct enic *enic = netdev_priv(netdev); in enic_get_stats() local
620 err = enic_dev_stats_dump(enic, &stats); in enic_get_stats()
637 net_stats->rx_over_errors = enic->rq_truncated_pkts; in enic_get_stats()
638 net_stats->rx_crc_errors = enic->rq_bad_fcs; in enic_get_stats()
646 struct enic *enic = netdev_priv(netdev); in enic_mc_sync() local
648 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
657 enic_dev_add_addr(enic, mc_addr); in enic_mc_sync()
658 enic->mc_count++; in enic_mc_sync()
665 struct enic *enic = netdev_priv(netdev); in enic_mc_unsync() local
667 enic_dev_del_addr(enic, mc_addr); in enic_mc_unsync()
668 enic->mc_count--; in enic_mc_unsync()
675 struct enic *enic = netdev_priv(netdev); in enic_uc_sync() local
677 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
686 enic_dev_add_addr(enic, uc_addr); in enic_uc_sync()
687 enic->uc_count++; in enic_uc_sync()
694 struct enic *enic = netdev_priv(netdev); in enic_uc_unsync() local
696 enic_dev_del_addr(enic, uc_addr); in enic_uc_unsync()
697 enic->uc_count--; in enic_uc_unsync()
702 void enic_reset_addr_lists(struct enic *enic) in enic_reset_addr_lists() argument
704 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
709 enic->mc_count = 0; in enic_reset_addr_lists()
710 enic->uc_count = 0; in enic_reset_addr_lists()
711 enic->flags = 0; in enic_reset_addr_lists()
716 struct enic *enic = netdev_priv(netdev); in enic_set_mac_addr() local
718 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_set_mac_addr()
733 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address_dynamic() local
738 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
739 err = enic_dev_del_station_addr(enic); in enic_set_mac_address_dynamic()
748 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
749 err = enic_dev_add_station_addr(enic); in enic_set_mac_address_dynamic()
761 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address() local
764 err = enic_dev_del_station_addr(enic); in enic_set_mac_address()
772 return enic_dev_add_station_addr(enic); in enic_set_mac_address()
778 struct enic *enic = netdev_priv(netdev); in enic_set_rx_mode() local
790 if (enic->flags != flags) { in enic_set_rx_mode()
791 enic->flags = flags; in enic_set_rx_mode()
792 enic_dev_packet_filter(enic, directed, in enic_set_rx_mode()
806 struct enic *enic = netdev_priv(netdev); in enic_tx_timeout() local
807 schedule_work(&enic->reset); in enic_tx_timeout()
812 struct enic *enic = netdev_priv(netdev); in enic_set_vf_mac() local
816 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_mac()
828 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_mac()
839 struct enic *enic = netdev_priv(netdev); in enic_set_vf_port() local
844 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_port()
851 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
852 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
884 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_port()
893 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); in enic_set_vf_port()
928 struct enic *enic = netdev_priv(netdev); in enic_get_vf_port() local
933 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_get_vf_port()
940 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
962 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf() local
967 pci_unmap_single(enic->pdev, buf->dma_addr, in enic_free_rq_buf()
975 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf() local
976 struct net_device *netdev = enic->netdev; in enic_rq_alloc_buf()
993 dma_addr = pci_map_single(enic->pdev, skb->data, len, in enic_rq_alloc_buf()
995 if (unlikely(enic_dma_map_check(enic, dma_addr))) { in enic_rq_alloc_buf()
1018 struct enic *enic = netdev_priv(netdev); in enic_rxcopybreak() local
1021 if (len > enic->rx_copybreak) in enic_rxcopybreak()
1026 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, in enic_rxcopybreak()
1038 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf() local
1039 struct net_device *netdev = enic->netdev; in enic_rq_indicate_buf()
1041 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf()
1070 enic->rq_bad_fcs++; in enic_rq_indicate_buf()
1072 enic->rq_truncated_pkts++; in enic_rq_indicate_buf()
1075 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1090 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1119 skb_mark_napi_id(skb, &enic->napi[rq->index]); in enic_rq_indicate_buf()
1124 napi_gro_receive(&enic->napi[q_number], skb); in enic_rq_indicate_buf()
1125 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_rq_indicate_buf()
1133 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1143 struct enic *enic = vnic_dev_priv(vdev); in enic_rq_service() local
1145 vnic_rq_service(&enic->rq[q_number], cq_desc, in enic_rq_service()
1155 struct enic *enic = netdev_priv(netdev); in enic_poll() local
1156 unsigned int cq_rq = enic_cq_rq(enic, 0); in enic_poll()
1157 unsigned int cq_wq = enic_cq_wq(enic, 0); in enic_poll()
1164 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, in enic_poll()
1167 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { in enic_poll()
1169 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1177 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], in enic_poll()
1188 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_poll()
1209 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1211 enic_poll_unlock_napi(&enic->rq[cq_rq]); in enic_poll()
1216 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_set_int_moderation() argument
1218 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1219 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1223 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1228 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_calc_int_moderation() argument
1230 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1231 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1275 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1277 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1278 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1281 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1285 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1286 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1287 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1289 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1290 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1291 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1293 enic_free_rx_cpu_rmap(enic); in enic_set_rx_cpu_rmap()
1302 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1306 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1316 struct enic *enic = netdev_priv(netdev); in enic_busy_poll() local
1317 unsigned int rq = (napi - &enic->napi[0]); in enic_busy_poll()
1318 unsigned int cq = enic_cq_rq(enic, rq); in enic_busy_poll()
1319 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_busy_poll()
1323 if (!enic_poll_lock_poll(&enic->rq[rq])) in enic_busy_poll()
1325 work_done = vnic_cq_service(&enic->cq[cq], work_to_do, in enic_busy_poll()
1329 vnic_intr_return_credits(&enic->intr[intr], in enic_busy_poll()
1331 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_busy_poll()
1332 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_busy_poll()
1333 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_busy_poll()
1334 enic_poll_unlock_poll(&enic->rq[rq]); in enic_busy_poll()
1343 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_wq() local
1344 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1345 struct vnic_wq *wq = &enic->wq[wq_index]; in enic_poll_msix_wq()
1353 cq = enic_cq_wq(enic, wq_irq); in enic_poll_msix_wq()
1354 intr = enic_msix_wq_intr(enic, wq_irq); in enic_poll_msix_wq()
1355 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, in enic_poll_msix_wq()
1358 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1363 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1373 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_rq() local
1374 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1375 unsigned int cq = enic_cq_rq(enic, rq); in enic_poll_msix_rq()
1376 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_poll_msix_rq()
1381 if (!enic_poll_lock_napi(&enic->rq[rq])) in enic_poll_msix_rq()
1387 work_done = vnic_cq_service(&enic->cq[cq], in enic_poll_msix_rq()
1396 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1401 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_poll_msix_rq()
1409 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1415 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1417 enic_poll_unlock_napi(&enic->rq[rq]); in enic_poll_msix_rq()
1425 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1426 enic_set_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1427 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1435 struct enic *enic = (struct enic *)data; in enic_notify_timer() local
1437 enic_notify_check(enic); in enic_notify_timer()
1439 mod_timer(&enic->notify_timer, in enic_notify_timer()
1443 static void enic_free_intr(struct enic *enic) in enic_free_intr() argument
1445 struct net_device *netdev = enic->netdev; in enic_free_intr()
1448 enic_free_rx_cpu_rmap(enic); in enic_free_intr()
1449 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1451 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1454 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1457 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_free_intr()
1458 if (enic->msix[i].requested) in enic_free_intr()
1459 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1460 enic->msix[i].devid); in enic_free_intr()
1467 static int enic_request_intr(struct enic *enic) in enic_request_intr() argument
1469 struct net_device *netdev = enic->netdev; in enic_request_intr()
1473 enic_set_rx_cpu_rmap(enic); in enic_request_intr()
1474 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1478 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1484 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1485 0, netdev->name, enic); in enic_request_intr()
1490 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1491 intr = enic_msix_rq_intr(enic, i); in enic_request_intr()
1492 snprintf(enic->msix[intr].devname, in enic_request_intr()
1493 sizeof(enic->msix[intr].devname), in enic_request_intr()
1495 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1496 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1499 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1500 int wq = enic_cq_wq(enic, i); in enic_request_intr()
1502 intr = enic_msix_wq_intr(enic, i); in enic_request_intr()
1503 snprintf(enic->msix[intr].devname, in enic_request_intr()
1504 sizeof(enic->msix[intr].devname), in enic_request_intr()
1506 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1507 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1510 intr = enic_msix_err_intr(enic); in enic_request_intr()
1511 snprintf(enic->msix[intr].devname, in enic_request_intr()
1512 sizeof(enic->msix[intr].devname), in enic_request_intr()
1514 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1515 enic->msix[intr].devid = enic; in enic_request_intr()
1517 intr = enic_msix_notify_intr(enic); in enic_request_intr()
1518 snprintf(enic->msix[intr].devname, in enic_request_intr()
1519 sizeof(enic->msix[intr].devname), in enic_request_intr()
1521 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1522 enic->msix[intr].devid = enic; in enic_request_intr()
1524 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_request_intr()
1525 enic->msix[i].requested = 0; in enic_request_intr()
1527 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1528 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1529 enic->msix[i].isr, 0, in enic_request_intr()
1530 enic->msix[i].devname, in enic_request_intr()
1531 enic->msix[i].devid); in enic_request_intr()
1533 enic_free_intr(enic); in enic_request_intr()
1536 enic->msix[i].requested = 1; in enic_request_intr()
1548 static void enic_synchronize_irqs(struct enic *enic) in enic_synchronize_irqs() argument
1552 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1555 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1558 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1559 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1566 static void enic_set_rx_coal_setting(struct enic *enic) in enic_set_rx_coal_setting() argument
1570 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
1573 if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { in enic_set_rx_coal_setting()
1574 netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); in enic_set_rx_coal_setting()
1582 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
1595 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
1596 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
1597 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
1602 static int enic_dev_notify_set(struct enic *enic) in enic_dev_notify_set() argument
1606 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1607 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1609 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1613 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1614 enic_msix_notify_intr(enic)); in enic_dev_notify_set()
1617 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1620 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1625 static void enic_notify_timer_start(struct enic *enic) in enic_notify_timer_start() argument
1627 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1629 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1640 struct enic *enic = netdev_priv(netdev); in enic_open() local
1644 err = enic_request_intr(enic); in enic_open()
1650 err = enic_dev_notify_set(enic); in enic_open()
1657 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1658 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); in enic_open()
1660 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { in enic_open()
1667 for (i = 0; i < enic->wq_count; i++) in enic_open()
1668 vnic_wq_enable(&enic->wq[i]); in enic_open()
1669 for (i = 0; i < enic->rq_count; i++) in enic_open()
1670 vnic_rq_enable(&enic->rq[i]); in enic_open()
1672 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_open()
1673 enic_dev_add_station_addr(enic); in enic_open()
1679 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1680 enic_busy_poll_init_lock(&enic->rq[i]); in enic_open()
1681 napi_enable(&enic->napi[i]); in enic_open()
1683 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
1684 for (i = 0; i < enic->wq_count; i++) in enic_open()
1685 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
1686 enic_dev_enable(enic); in enic_open()
1688 for (i = 0; i < enic->intr_count; i++) in enic_open()
1689 vnic_intr_unmask(&enic->intr[i]); in enic_open()
1691 enic_notify_timer_start(enic); in enic_open()
1692 enic_rfs_flw_tbl_init(enic); in enic_open()
1697 for (i = 0; i < enic->rq_count; i++) in enic_open()
1698 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_open()
1699 enic_dev_notify_unset(enic); in enic_open()
1701 enic_free_intr(enic); in enic_open()
1709 struct enic *enic = netdev_priv(netdev); in enic_stop() local
1713 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
1714 vnic_intr_mask(&enic->intr[i]); in enic_stop()
1715 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
1718 enic_synchronize_irqs(enic); in enic_stop()
1720 del_timer_sync(&enic->notify_timer); in enic_stop()
1721 enic_rfs_flw_tbl_free(enic); in enic_stop()
1723 enic_dev_disable(enic); in enic_stop()
1725 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1726 napi_disable(&enic->napi[i]); in enic_stop()
1728 while (!enic_poll_lock_napi(&enic->rq[i])) in enic_stop()
1735 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
1736 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1737 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
1739 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_stop()
1740 enic_dev_del_station_addr(enic); in enic_stop()
1742 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
1743 err = vnic_wq_disable(&enic->wq[i]); in enic_stop()
1747 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1748 err = vnic_rq_disable(&enic->rq[i]); in enic_stop()
1753 enic_dev_notify_unset(enic); in enic_stop()
1754 enic_free_intr(enic); in enic_stop()
1756 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1757 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); in enic_stop()
1758 for (i = 0; i < enic->rq_count; i++) in enic_stop()
1759 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_stop()
1760 for (i = 0; i < enic->cq_count; i++) in enic_stop()
1761 vnic_cq_clean(&enic->cq[i]); in enic_stop()
1762 for (i = 0; i < enic->intr_count; i++) in enic_stop()
1763 vnic_intr_clean(&enic->intr[i]); in enic_stop()
1770 struct enic *enic = netdev_priv(netdev); in enic_change_mtu() local
1776 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_change_mtu()
1784 if (netdev->mtu > enic->port_mtu) in enic_change_mtu()
1787 netdev->mtu, enic->port_mtu); in enic_change_mtu()
1797 struct enic *enic = container_of(work, struct enic, change_mtu_work); in enic_change_mtu_work() local
1798 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
1799 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
1808 del_timer_sync(&enic->notify_timer); in enic_change_mtu_work()
1810 for (i = 0; i < enic->rq_count; i++) in enic_change_mtu_work()
1811 napi_disable(&enic->napi[i]); in enic_change_mtu_work()
1813 vnic_intr_mask(&enic->intr[0]); in enic_change_mtu_work()
1814 enic_synchronize_irqs(enic); in enic_change_mtu_work()
1815 err = vnic_rq_disable(&enic->rq[0]); in enic_change_mtu_work()
1821 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); in enic_change_mtu_work()
1822 vnic_cq_clean(&enic->cq[0]); in enic_change_mtu_work()
1823 vnic_intr_clean(&enic->intr[0]); in enic_change_mtu_work()
1827 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_change_mtu_work()
1829 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { in enic_change_mtu_work()
1836 vnic_rq_enable(&enic->rq[0]); in enic_change_mtu_work()
1837 napi_enable(&enic->napi[0]); in enic_change_mtu_work()
1838 vnic_intr_unmask(&enic->intr[0]); in enic_change_mtu_work()
1839 enic_notify_timer_start(enic); in enic_change_mtu_work()
1849 struct enic *enic = netdev_priv(netdev); in enic_poll_controller() local
1850 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
1855 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
1856 intr = enic_msix_rq_intr(enic, i); in enic_poll_controller()
1857 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1858 &enic->napi[i]); in enic_poll_controller()
1861 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
1862 intr = enic_msix_wq_intr(enic, i); in enic_poll_controller()
1863 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1864 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
1869 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
1872 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
1915 static int enic_dev_open(struct enic *enic) in enic_dev_open() argument
1919 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
1922 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", in enic_dev_open()
1928 static int enic_dev_hang_reset(struct enic *enic) in enic_dev_hang_reset() argument
1932 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
1935 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
1941 int __enic_set_rsskey(struct enic *enic) in __enic_set_rsskey() argument
1947 rss_key_buf_va = pci_zalloc_consistent(enic->pdev, in __enic_set_rsskey()
1956 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
1958 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
1959 err = enic_set_rss_key(enic, in __enic_set_rsskey()
1962 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
1964 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
1970 static int enic_set_rsskey(struct enic *enic) in enic_set_rsskey() argument
1972 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
1974 return __enic_set_rsskey(enic); in enic_set_rsskey()
1977 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) in enic_set_rsscpu() argument
1984 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, in enic_set_rsscpu()
1990 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
1992 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
1993 err = enic_set_rss_cpu(enic, in enic_set_rsscpu()
1996 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
1998 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2004 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, in enic_set_niccfg() argument
2014 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2015 err = enic_set_nic_cfg(enic, in enic_set_niccfg()
2020 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2025 static int enic_set_rss_nic_cfg(struct enic *enic) in enic_set_rss_nic_cfg() argument
2027 struct device *dev = enic_get_dev(enic); in enic_set_rss_nic_cfg()
2035 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2038 if (!enic_set_rsskey(enic)) { in enic_set_rss_nic_cfg()
2039 if (enic_set_rsscpu(enic, rss_hash_bits)) { in enic_set_rss_nic_cfg()
2050 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, in enic_set_rss_nic_cfg()
2056 struct enic *enic = container_of(work, struct enic, reset); in enic_reset() local
2058 if (!netif_running(enic->netdev)) in enic_reset()
2063 spin_lock(&enic->enic_api_lock); in enic_reset()
2064 enic_dev_hang_notify(enic); in enic_reset()
2065 enic_stop(enic->netdev); in enic_reset()
2066 enic_dev_hang_reset(enic); in enic_reset()
2067 enic_reset_addr_lists(enic); in enic_reset()
2068 enic_init_vnic_resources(enic); in enic_reset()
2069 enic_set_rss_nic_cfg(enic); in enic_reset()
2070 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_reset()
2071 enic_open(enic->netdev); in enic_reset()
2072 spin_unlock(&enic->enic_api_lock); in enic_reset()
2073 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2078 static int enic_set_intr_mode(struct enic *enic) in enic_set_intr_mode() argument
2080 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); in enic_set_intr_mode()
2081 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); in enic_set_intr_mode()
2094 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); in enic_set_intr_mode()
2096 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2101 if (ENIC_SETTING(enic, RSS) && in enic_set_intr_mode()
2102 enic->config.intr_mode < 1 && in enic_set_intr_mode()
2103 enic->rq_count >= n && in enic_set_intr_mode()
2104 enic->wq_count >= m && in enic_set_intr_mode()
2105 enic->cq_count >= n + m && in enic_set_intr_mode()
2106 enic->intr_count >= n + m + 2) { in enic_set_intr_mode()
2108 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2111 enic->rq_count = n; in enic_set_intr_mode()
2112 enic->wq_count = m; in enic_set_intr_mode()
2113 enic->cq_count = n + m; in enic_set_intr_mode()
2114 enic->intr_count = n + m + 2; in enic_set_intr_mode()
2116 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2123 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2124 enic->rq_count >= 1 && in enic_set_intr_mode()
2125 enic->wq_count >= m && in enic_set_intr_mode()
2126 enic->cq_count >= 1 + m && in enic_set_intr_mode()
2127 enic->intr_count >= 1 + m + 2) { in enic_set_intr_mode()
2128 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2131 enic->rq_count = 1; in enic_set_intr_mode()
2132 enic->wq_count = m; in enic_set_intr_mode()
2133 enic->cq_count = 1 + m; in enic_set_intr_mode()
2134 enic->intr_count = 1 + m + 2; in enic_set_intr_mode()
2136 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2148 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2149 enic->rq_count >= 1 && in enic_set_intr_mode()
2150 enic->wq_count >= 1 && in enic_set_intr_mode()
2151 enic->cq_count >= 2 && in enic_set_intr_mode()
2152 enic->intr_count >= 1 && in enic_set_intr_mode()
2153 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2155 enic->rq_count = 1; in enic_set_intr_mode()
2156 enic->wq_count = 1; in enic_set_intr_mode()
2157 enic->cq_count = 2; in enic_set_intr_mode()
2158 enic->intr_count = 1; in enic_set_intr_mode()
2160 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2173 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2174 enic->rq_count >= 1 && in enic_set_intr_mode()
2175 enic->wq_count >= 1 && in enic_set_intr_mode()
2176 enic->cq_count >= 2 && in enic_set_intr_mode()
2177 enic->intr_count >= 3) { in enic_set_intr_mode()
2179 enic->rq_count = 1; in enic_set_intr_mode()
2180 enic->wq_count = 1; in enic_set_intr_mode()
2181 enic->cq_count = 2; in enic_set_intr_mode()
2182 enic->intr_count = 3; in enic_set_intr_mode()
2184 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2189 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2194 static void enic_clear_intr_mode(struct enic *enic) in enic_clear_intr_mode() argument
2196 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2198 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2201 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2207 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2262 static void enic_dev_deinit(struct enic *enic) in enic_dev_deinit() argument
2266 for (i = 0; i < enic->rq_count; i++) { in enic_dev_deinit()
2267 napi_hash_del(&enic->napi[i]); in enic_dev_deinit()
2268 netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2270 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2271 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2272 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2274 enic_free_vnic_resources(enic); in enic_dev_deinit()
2275 enic_clear_intr_mode(enic); in enic_dev_deinit()
2278 static void enic_kdump_kernel_config(struct enic *enic) in enic_kdump_kernel_config() argument
2281 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); in enic_kdump_kernel_config()
2282 enic->rq_count = 1; in enic_kdump_kernel_config()
2283 enic->wq_count = 1; in enic_kdump_kernel_config()
2284 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_kdump_kernel_config()
2285 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_kdump_kernel_config()
2286 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_kdump_kernel_config()
2290 static int enic_dev_init(struct enic *enic) in enic_dev_init() argument
2292 struct device *dev = enic_get_dev(enic); in enic_dev_init()
2293 struct net_device *netdev = enic->netdev; in enic_dev_init()
2298 err = enic_dev_intr_coal_timer_info(enic); in enic_dev_init()
2302 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2308 err = enic_get_vnic_config(enic); in enic_dev_init()
2317 enic_get_res_counts(enic); in enic_dev_init()
2321 enic_kdump_kernel_config(enic); in enic_dev_init()
2327 err = enic_set_intr_mode(enic); in enic_dev_init()
2337 err = enic_alloc_vnic_resources(enic); in enic_dev_init()
2343 enic_init_vnic_resources(enic); in enic_dev_init()
2345 err = enic_set_rss_nic_cfg(enic); in enic_dev_init()
2351 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2353 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); in enic_dev_init()
2354 napi_hash_add(&enic->napi[0]); in enic_dev_init()
2357 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2358 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2360 napi_hash_add(&enic->napi[i]); in enic_dev_init()
2362 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2363 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2371 enic_clear_intr_mode(enic); in enic_dev_init()
2372 enic_free_vnic_resources(enic); in enic_dev_init()
2377 static void enic_iounmap(struct enic *enic) in enic_iounmap() argument
2381 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2382 if (enic->bar[i].vaddr) in enic_iounmap()
2383 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2390 struct enic *enic; in enic_probe() local
2403 netdev = alloc_etherdev_mqs(sizeof(struct enic), in enic_probe()
2412 enic = netdev_priv(netdev); in enic_probe()
2413 enic->netdev = netdev; in enic_probe()
2414 enic->pdev = pdev; in enic_probe()
2464 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2467 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2468 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2469 if (!enic->bar[i].vaddr) { in enic_probe()
2474 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2480 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2481 ARRAY_SIZE(enic->bar)); in enic_probe()
2482 if (!enic->vdev) { in enic_probe()
2493 &enic->num_vfs); in enic_probe()
2494 if (enic->num_vfs) { in enic_probe()
2495 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2502 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2503 num_pps = enic->num_vfs; in enic_probe()
2509 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2510 if (!enic->pp) { in enic_probe()
2518 err = enic_dev_open(enic); in enic_probe()
2527 spin_lock_init(&enic->devcmd_lock); in enic_probe()
2528 spin_lock_init(&enic->enic_api_lock); in enic_probe()
2534 err = enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_probe()
2556 if (!enic_is_dynamic(enic)) { in enic_probe()
2557 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
2564 err = enic_dev_init(enic); in enic_probe()
2570 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
2571 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
2576 init_timer(&enic->notify_timer); in enic_probe()
2577 enic->notify_timer.function = enic_notify_timer; in enic_probe()
2578 enic->notify_timer.data = (unsigned long)enic; in enic_probe()
2580 enic_set_rx_coal_setting(enic); in enic_probe()
2581 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
2582 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
2584 for (i = 0; i < enic->wq_count; i++) in enic_probe()
2585 spin_lock_init(&enic->wq_lock[i]); in enic_probe()
2590 enic->port_mtu = enic->config.mtu; in enic_probe()
2591 (void)enic_change_mtu(netdev, enic->port_mtu); in enic_probe()
2593 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
2599 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
2603 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
2605 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_probe()
2614 if (ENIC_SETTING(enic, LOOP)) { in enic_probe()
2616 enic->loop_enable = 1; in enic_probe()
2617 enic->loop_tag = enic->config.loop_tag; in enic_probe()
2618 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
2620 if (ENIC_SETTING(enic, TXCSUM)) in enic_probe()
2622 if (ENIC_SETTING(enic, TSO)) in enic_probe()
2625 if (ENIC_SETTING(enic, RSS)) in enic_probe()
2627 if (ENIC_SETTING(enic, RXCSUM)) in enic_probe()
2646 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; in enic_probe()
2651 enic_dev_deinit(enic); in enic_probe()
2653 vnic_dev_close(enic->vdev); in enic_probe()
2655 kfree(enic->pp); in enic_probe()
2658 if (enic_sriov_enabled(enic)) { in enic_probe()
2660 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
2664 vnic_dev_unregister(enic->vdev); in enic_probe()
2666 enic_iounmap(enic); in enic_probe()
2682 struct enic *enic = netdev_priv(netdev); in enic_remove() local
2684 cancel_work_sync(&enic->reset); in enic_remove()
2685 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
2687 enic_dev_deinit(enic); in enic_remove()
2688 vnic_dev_close(enic->vdev); in enic_remove()
2690 if (enic_sriov_enabled(enic)) { in enic_remove()
2692 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
2695 kfree(enic->pp); in enic_remove()
2696 vnic_dev_unregister(enic->vdev); in enic_remove()
2697 enic_iounmap(enic); in enic_remove()