Lines Matching refs:bnad

79 bnad_add_to_list(struct bnad *bnad)  in bnad_add_to_list()  argument
82 list_add_tail(&bnad->list_entry, &bnad_list); in bnad_add_to_list()
83 bnad->id = bna_id++; in bnad_add_to_list()
88 bnad_remove_from_list(struct bnad *bnad) in bnad_remove_from_list() argument
91 list_del(&bnad->list_entry); in bnad_remove_from_list()
99 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) in bnad_cq_cleanup() argument
115 bnad_tx_buff_unmap(struct bnad *bnad, in bnad_tx_buff_unmap() argument
129 dma_unmap_single(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
144 dma_unmap_page(&bnad->pcidev->dev, in bnad_tx_buff_unmap()
163 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txq_cleanup() argument
173 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup()
185 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) in bnad_txcmpl_process() argument
215 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); in bnad_txcmpl_process()
229 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) in bnad_tx_complete() argument
231 struct net_device *netdev = bnad->netdev; in bnad_tx_complete()
237 sent = bnad_txcmpl_process(bnad, tcb); in bnad_tx_complete()
245 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_tx_complete()
264 struct bnad *bnad = tcb->bnad; in bnad_msix_tx() local
266 bnad_tx_complete(bnad, tcb); in bnad_msix_tx()
272 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_uninit() argument
284 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_alloc_init() argument
289 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_alloc_init()
317 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) in bnad_rxq_cleanup_page() argument
322 dma_unmap_page(&bnad->pcidev->dev, in bnad_rxq_cleanup_page()
332 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) in bnad_rxq_cleanup_skb() argument
337 dma_unmap_single(&bnad->pcidev->dev, in bnad_rxq_cleanup_skb()
347 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_cleanup() argument
356 bnad_rxq_cleanup_skb(bnad, unmap); in bnad_rxq_cleanup()
358 bnad_rxq_cleanup_page(bnad, unmap); in bnad_rxq_cleanup()
360 bnad_rxq_alloc_uninit(bnad, rcb); in bnad_rxq_cleanup()
364 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_page() argument
395 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); in bnad_rxq_refill_page()
400 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, in bnad_rxq_refill_page()
432 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) in bnad_rxq_refill_skb() argument
449 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); in bnad_rxq_refill_skb()
452 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); in bnad_rxq_refill_skb()
456 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_rxq_refill_skb()
481 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) in bnad_rxq_post() argument
491 bnad_rxq_refill_skb(bnad, rcb, to_alloc); in bnad_rxq_post()
493 bnad_rxq_refill_page(bnad, rcb, to_alloc); in bnad_rxq_post()
511 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, in bnad_cq_drop_packet() argument
524 bnad_rxq_cleanup_skb(bnad, unmap); in bnad_cq_drop_packet()
526 bnad_rxq_cleanup_page(bnad, unmap); in bnad_cq_drop_packet()
534 struct bnad *bnad; in bnad_cq_setup_skb_frags() local
540 bnad = rcb->bnad; in bnad_cq_setup_skb_frags()
550 dma_unmap_page(&bnad->pcidev->dev, in bnad_cq_setup_skb_frags()
571 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, in bnad_cq_setup_skb() argument
576 dma_unmap_single(&bnad->pcidev->dev, in bnad_cq_setup_skb()
581 skb->protocol = eth_type_trans(skb, bnad->netdev); in bnad_cq_setup_skb()
588 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) in bnad_cq_process() argument
601 prefetch(bnad->netdev); in bnad_cq_process()
684 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); in bnad_cq_process()
691 bnad_cq_setup_skb(bnad, skb, unmap, len); in bnad_cq_process()
702 ((bnad->netdev->features & NETIF_F_RXCSUM) && in bnad_cq_process()
712 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) in bnad_cq_process()
734 bnad_rxq_post(bnad, ccb->rcb[0]); in bnad_cq_process()
736 bnad_rxq_post(bnad, ccb->rcb[1]); in bnad_cq_process()
742 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) in bnad_netif_rx_schedule_poll() argument
761 bnad_netif_rx_schedule_poll(ccb->bnad, ccb); in bnad_msix_rx()
775 struct bnad *bnad = (struct bnad *)data; in bnad_msix_mbox_handler() local
777 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
778 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_msix_mbox_handler()
779 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
783 bna_intr_status_get(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
785 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_msix_mbox_handler()
786 bna_mbox_handler(&bnad->bna, intr_status); in bnad_msix_mbox_handler()
788 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_msix_mbox_handler()
799 struct bnad *bnad = (struct bnad *)data; in bnad_isr() local
804 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_isr()
805 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { in bnad_isr()
806 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
810 bna_intr_status_get(&bnad->bna, intr_status); in bnad_isr()
813 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
817 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) in bnad_isr()
818 bna_mbox_handler(&bnad->bna, intr_status); in bnad_isr()
820 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_isr()
827 for (i = 0; i < bnad->num_tx; i++) { in bnad_isr()
828 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_isr()
829 tcb = bnad->tx_info[i].tcb[j]; in bnad_isr()
831 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); in bnad_isr()
835 for (i = 0; i < bnad->num_rx; i++) { in bnad_isr()
836 rx_info = &bnad->rx_info[i]; in bnad_isr()
839 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_isr()
842 bnad_netif_rx_schedule_poll(bnad, in bnad_isr()
854 bnad_enable_mbox_irq(struct bnad *bnad) in bnad_enable_mbox_irq() argument
856 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_enable_mbox_irq()
858 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); in bnad_enable_mbox_irq()
866 bnad_disable_mbox_irq(struct bnad *bnad) in bnad_disable_mbox_irq() argument
868 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_disable_mbox_irq()
870 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); in bnad_disable_mbox_irq()
874 bnad_set_netdev_perm_addr(struct bnad *bnad) in bnad_set_netdev_perm_addr() argument
876 struct net_device *netdev = bnad->netdev; in bnad_set_netdev_perm_addr()
878 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); in bnad_set_netdev_perm_addr()
880 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); in bnad_set_netdev_perm_addr()
887 bnad_cb_mbox_intr_enable(struct bnad *bnad) in bnad_cb_mbox_intr_enable() argument
889 bnad_enable_mbox_irq(bnad); in bnad_cb_mbox_intr_enable()
893 bnad_cb_mbox_intr_disable(struct bnad *bnad) in bnad_cb_mbox_intr_disable() argument
895 bnad_disable_mbox_irq(bnad); in bnad_cb_mbox_intr_disable()
899 bnad_cb_ioceth_ready(struct bnad *bnad) in bnad_cb_ioceth_ready() argument
901 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_ready()
902 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_ready()
906 bnad_cb_ioceth_failed(struct bnad *bnad) in bnad_cb_ioceth_failed() argument
908 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; in bnad_cb_ioceth_failed()
909 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_failed()
913 bnad_cb_ioceth_disabled(struct bnad *bnad) in bnad_cb_ioceth_disabled() argument
915 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; in bnad_cb_ioceth_disabled()
916 complete(&bnad->bnad_completions.ioc_comp); in bnad_cb_ioceth_disabled()
922 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_enet_disabled() local
924 netif_carrier_off(bnad->netdev); in bnad_cb_enet_disabled()
925 complete(&bnad->bnad_completions.enet_comp); in bnad_cb_enet_disabled()
929 bnad_cb_ethport_link_status(struct bnad *bnad, in bnad_cb_ethport_link_status() argument
937 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
938 BNAD_UPDATE_CTR(bnad, cee_toggle); in bnad_cb_ethport_link_status()
939 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
941 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) in bnad_cb_ethport_link_status()
942 BNAD_UPDATE_CTR(bnad, cee_toggle); in bnad_cb_ethport_link_status()
943 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); in bnad_cb_ethport_link_status()
947 if (!netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
950 bnad->netdev->name); in bnad_cb_ethport_link_status()
951 netif_carrier_on(bnad->netdev); in bnad_cb_ethport_link_status()
952 BNAD_UPDATE_CTR(bnad, link_toggle); in bnad_cb_ethport_link_status()
953 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { in bnad_cb_ethport_link_status()
954 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; in bnad_cb_ethport_link_status()
957 bnad->tx_info[tx_id].tcb[tcb_id]; in bnad_cb_ethport_link_status()
971 bnad->netdev->name, in bnad_cb_ethport_link_status()
974 bnad->netdev, in bnad_cb_ethport_link_status()
976 BNAD_UPDATE_CTR(bnad, in bnad_cb_ethport_link_status()
980 bnad->netdev, in bnad_cb_ethport_link_status()
982 BNAD_UPDATE_CTR(bnad, in bnad_cb_ethport_link_status()
989 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_ethport_link_status()
991 bnad->netdev->name); in bnad_cb_ethport_link_status()
992 netif_carrier_off(bnad->netdev); in bnad_cb_ethport_link_status()
993 BNAD_UPDATE_CTR(bnad, link_toggle); in bnad_cb_ethport_link_status()
1001 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_tx_disabled() local
1003 complete(&bnad->bnad_completions.tx_comp); in bnad_cb_tx_disabled()
1007 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_setup() argument
1017 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) in bnad_cb_tcb_destroy() argument
1027 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) in bnad_cb_ccb_setup() argument
1037 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) in bnad_cb_ccb_destroy() argument
1046 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_stall() argument
1060 netif_stop_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_stall()
1062 bnad->netdev->name, txq_id); in bnad_cb_tx_stall()
1067 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_resume() argument
1084 if (netif_carrier_ok(bnad->netdev)) { in bnad_cb_tx_resume()
1086 bnad->netdev->name, txq_id); in bnad_cb_tx_resume()
1087 netif_wake_subqueue(bnad->netdev, txq_id); in bnad_cb_tx_resume()
1088 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_cb_tx_resume()
1097 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { in bnad_cb_tx_resume()
1098 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); in bnad_cb_tx_resume()
1099 bnad_set_netdev_perm_addr(bnad); in bnad_cb_tx_resume()
1111 struct bnad *bnad = NULL; in bnad_tx_cleanup() local
1121 bnad = tcb->bnad; in bnad_tx_cleanup()
1128 bnad_txq_cleanup(bnad, tcb); in bnad_tx_cleanup()
1135 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, in bnad_tx_cleanup()
1140 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1142 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_tx_cleanup()
1146 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) in bnad_cb_tx_cleanup() argument
1158 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); in bnad_cb_tx_cleanup()
1162 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_stall() argument
1191 struct bnad *bnad = NULL; in bnad_rx_cleanup() local
1201 bnad = rx_ctrl->ccb->bnad; in bnad_rx_cleanup()
1209 bnad_cq_cleanup(bnad, rx_ctrl->ccb); in bnad_rx_cleanup()
1210 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); in bnad_rx_cleanup()
1212 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); in bnad_rx_cleanup()
1215 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1217 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_rx_cleanup()
1221 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_cleanup() argument
1240 queue_work(bnad->work_q, &rx_info->rx_cleanup_work); in bnad_cb_rx_cleanup()
1244 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_post() argument
1265 bnad_rxq_alloc_init(bnad, rcb); in bnad_cb_rx_post()
1268 bnad_rxq_post(bnad, rcb); in bnad_cb_rx_post()
1276 struct bnad *bnad = (struct bnad *)arg; in bnad_cb_rx_disabled() local
1278 complete(&bnad->bnad_completions.rx_comp); in bnad_cb_rx_disabled()
1282 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) in bnad_cb_rx_mcast_add() argument
1284 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; in bnad_cb_rx_mcast_add()
1285 complete(&bnad->bnad_completions.mcast_comp); in bnad_cb_rx_mcast_add()
1289 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, in bnad_cb_stats_get() argument
1293 BNAD_UPDATE_CTR(bnad, hw_stats_updates); in bnad_cb_stats_get()
1295 if (!netif_running(bnad->netdev) || in bnad_cb_stats_get()
1296 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_cb_stats_get()
1299 mod_timer(&bnad->stats_timer, in bnad_cb_stats_get()
1304 bnad_cb_enet_mtu_set(struct bnad *bnad) in bnad_cb_enet_mtu_set() argument
1306 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; in bnad_cb_enet_mtu_set()
1307 complete(&bnad->bnad_completions.mtu_comp); in bnad_cb_enet_mtu_set()
1323 bnad_mem_free(struct bnad *bnad, in bnad_mem_free() argument
1337 dma_free_coherent(&bnad->pcidev->dev, in bnad_mem_free()
1349 bnad_mem_alloc(struct bnad *bnad, in bnad_mem_alloc() argument
1369 dma_alloc_coherent(&bnad->pcidev->dev, in bnad_mem_alloc()
1391 bnad_mem_free(bnad, mem_info); in bnad_mem_alloc()
1397 bnad_mbox_irq_free(struct bnad *bnad) in bnad_mbox_irq_free() argument
1402 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1403 bnad_disable_mbox_irq(bnad); in bnad_mbox_irq_free()
1404 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_free()
1406 irq = BNAD_GET_MBOX_IRQ(bnad); in bnad_mbox_irq_free()
1407 free_irq(irq, bnad); in bnad_mbox_irq_free()
1416 bnad_mbox_irq_alloc(struct bnad *bnad) in bnad_mbox_irq_alloc() argument
1423 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1424 if (bnad->cfg_flags & BNAD_CF_MSIX) { in bnad_mbox_irq_alloc()
1426 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_alloc()
1430 irq = bnad->pcidev->irq; in bnad_mbox_irq_alloc()
1434 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_alloc()
1435 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); in bnad_mbox_irq_alloc()
1441 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); in bnad_mbox_irq_alloc()
1443 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); in bnad_mbox_irq_alloc()
1446 bnad->mbox_irq_name, bnad); in bnad_mbox_irq_alloc()
1452 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) in bnad_txrx_irq_free() argument
1460 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, in bnad_txrx_irq_alloc() argument
1467 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1468 cfg_flags = bnad->cfg_flags; in bnad_txrx_irq_alloc()
1469 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_txrx_irq_alloc()
1486 (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_txrx_irq_alloc()
1522 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, in bnad_tx_msix_unregister() argument
1533 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); in bnad_tx_msix_unregister()
1541 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, in bnad_tx_msix_register() argument
1550 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, in bnad_tx_msix_register()
1552 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_tx_msix_register()
1564 bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); in bnad_tx_msix_register()
1572 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, in bnad_rx_msix_unregister() argument
1583 free_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_unregister()
1592 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, in bnad_rx_msix_register() argument
1602 bnad->netdev->name, in bnad_rx_msix_register()
1604 err = request_irq(bnad->msix_table[vector_num].vector, in bnad_rx_msix_register()
1616 bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); in bnad_rx_msix_register()
1622 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) in bnad_tx_res_free() argument
1628 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_tx_res_free()
1630 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); in bnad_tx_res_free()
1636 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_tx_res_alloc() argument
1643 err = bnad_mem_alloc(bnad, in bnad_tx_res_alloc()
1646 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, in bnad_tx_res_alloc()
1654 bnad_tx_res_free(bnad, res_info); in bnad_tx_res_alloc()
1660 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) in bnad_rx_res_free() argument
1666 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_rx_res_free()
1668 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); in bnad_rx_res_free()
1674 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_rx_res_alloc() argument
1682 err = bnad_mem_alloc(bnad, in bnad_rx_res_alloc()
1685 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, in bnad_rx_res_alloc()
1693 bnad_rx_res_free(bnad, res_info); in bnad_rx_res_alloc()
1702 struct bnad *bnad = (struct bnad *)data; in bnad_ioc_timeout() local
1705 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1706 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); in bnad_ioc_timeout()
1707 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_timeout()
1713 struct bnad *bnad = (struct bnad *)data; in bnad_ioc_hb_check() local
1716 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1717 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); in bnad_ioc_hb_check()
1718 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioc_hb_check()
1724 struct bnad *bnad = (struct bnad *)data; in bnad_iocpf_timeout() local
1727 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1728 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); in bnad_iocpf_timeout()
1729 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_timeout()
1735 struct bnad *bnad = (struct bnad *)data; in bnad_iocpf_sem_timeout() local
1738 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1739 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); in bnad_iocpf_sem_timeout()
1740 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_iocpf_sem_timeout()
1757 struct bnad *bnad = (struct bnad *)data; in bnad_dim_timeout() local
1763 if (!netif_carrier_ok(bnad->netdev)) in bnad_dim_timeout()
1766 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_dim_timeout()
1767 for (i = 0; i < bnad->num_rx; i++) { in bnad_dim_timeout()
1768 rx_info = &bnad->rx_info[i]; in bnad_dim_timeout()
1771 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_dim_timeout()
1780 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) in bnad_dim_timeout()
1781 mod_timer(&bnad->dim_timer, in bnad_dim_timeout()
1783 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_dim_timeout()
1790 struct bnad *bnad = (struct bnad *)data; in bnad_stats_timeout() local
1793 if (!netif_running(bnad->netdev) || in bnad_stats_timeout()
1794 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timeout()
1797 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timeout()
1798 bna_hw_stats_get(&bnad->bna); in bnad_stats_timeout()
1799 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timeout()
1807 bnad_dim_timer_start(struct bnad *bnad) in bnad_dim_timer_start() argument
1809 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_dim_timer_start()
1810 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_dim_timer_start()
1811 setup_timer(&bnad->dim_timer, bnad_dim_timeout, in bnad_dim_timer_start()
1812 (unsigned long)bnad); in bnad_dim_timer_start()
1813 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_dim_timer_start()
1814 mod_timer(&bnad->dim_timer, in bnad_dim_timer_start()
1824 bnad_stats_timer_start(struct bnad *bnad) in bnad_stats_timer_start() argument
1828 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1829 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { in bnad_stats_timer_start()
1830 setup_timer(&bnad->stats_timer, bnad_stats_timeout, in bnad_stats_timer_start()
1831 (unsigned long)bnad); in bnad_stats_timer_start()
1832 mod_timer(&bnad->stats_timer, in bnad_stats_timer_start()
1835 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_start()
1843 bnad_stats_timer_stop(struct bnad *bnad) in bnad_stats_timer_stop() argument
1848 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1849 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) in bnad_stats_timer_stop()
1851 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stats_timer_stop()
1853 del_timer_sync(&bnad->stats_timer); in bnad_stats_timer_stop()
1876 struct bnad *bnad = rx_ctrl->bnad; in bnad_napi_poll_rx() local
1881 if (!netif_carrier_ok(bnad->netdev)) in bnad_napi_poll_rx()
1884 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); in bnad_napi_poll_rx()
1901 bnad_napi_add(struct bnad *bnad, u32 rx_id) in bnad_napi_add() argument
1907 for (i = 0; i < bnad->num_rxp_per_rx; i++) { in bnad_napi_add()
1908 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; in bnad_napi_add()
1909 netif_napi_add(bnad->netdev, &rx_ctrl->napi, in bnad_napi_add()
1915 bnad_napi_delete(struct bnad *bnad, u32 rx_id) in bnad_napi_delete() argument
1920 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_napi_delete()
1921 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); in bnad_napi_delete()
1926 bnad_destroy_tx(struct bnad *bnad, u32 tx_id) in bnad_destroy_tx() argument
1928 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_destroy_tx()
1929 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_destroy_tx()
1935 init_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1936 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1939 wait_for_completion(&bnad->bnad_completions.tx_comp); in bnad_destroy_tx()
1942 bnad_tx_msix_unregister(bnad, tx_info, in bnad_destroy_tx()
1943 bnad->num_txq_per_tx); in bnad_destroy_tx()
1945 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_tx()
1947 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_tx()
1952 bnad_tx_res_free(bnad, res_info); in bnad_destroy_tx()
1957 bnad_setup_tx(struct bnad *bnad, u32 tx_id) in bnad_setup_tx() argument
1960 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; in bnad_setup_tx()
1961 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; in bnad_setup_tx()
1964 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; in bnad_setup_tx()
1979 tx_config->num_txq = bnad->num_txq_per_tx; in bnad_setup_tx()
1980 tx_config->txq_depth = bnad->txq_depth; in bnad_setup_tx()
1982 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; in bnad_setup_tx()
1985 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
1986 bna_tx_res_req(bnad->num_txq_per_tx, in bnad_setup_tx()
1987 bnad->txq_depth, res_info); in bnad_setup_tx()
1988 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
1992 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * in bnad_setup_tx()
1993 bnad->txq_depth)); in bnad_setup_tx()
1996 err = bnad_tx_res_alloc(bnad, res_info, tx_id); in bnad_setup_tx()
2001 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2002 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, in bnad_setup_tx()
2004 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2016 err = bnad_tx_msix_register(bnad, tx_info, in bnad_setup_tx()
2017 tx_id, bnad->num_txq_per_tx); in bnad_setup_tx()
2022 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2024 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2029 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_tx()
2031 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_tx()
2035 bnad_tx_res_free(bnad, res_info); in bnad_setup_tx()
2042 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) in bnad_init_rx_config() argument
2046 rx_config->num_paths = bnad->num_rxp_per_rx; in bnad_init_rx_config()
2047 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; in bnad_init_rx_config()
2049 if (bnad->num_rxp_per_rx > 1) { in bnad_init_rx_config()
2057 bnad->num_rxp_per_rx - 1; in bnad_init_rx_config()
2066 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); in bnad_init_rx_config()
2076 if (BNAD_PCI_DEV_IS_CAT2(bnad) && in bnad_init_rx_config()
2085 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; in bnad_init_rx_config()
2090 rx_config->q0_depth = bnad->rxq_depth; in bnad_init_rx_config()
2095 rx_config->q1_depth = bnad->rxq_depth; in bnad_init_rx_config()
2100 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? in bnad_init_rx_config()
2105 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) in bnad_rx_ctrl_init() argument
2107 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_rx_ctrl_init()
2110 for (i = 0; i < bnad->num_rxp_per_rx; i++) in bnad_rx_ctrl_init()
2111 rx_info->rx_ctrl[i].bnad = bnad; in bnad_rx_ctrl_init()
2116 bnad_reinit_rx(struct bnad *bnad) in bnad_reinit_rx() argument
2118 struct net_device *netdev = bnad->netdev; in bnad_reinit_rx()
2124 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2125 if (!bnad->rx_info[rx_id].rx) in bnad_reinit_rx()
2127 bnad_destroy_rx(bnad, rx_id); in bnad_reinit_rx()
2130 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2131 bna_enet_mtu_set(&bnad->bna.enet, in bnad_reinit_rx()
2132 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_reinit_rx()
2133 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2135 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { in bnad_reinit_rx()
2137 current_err = bnad_setup_rx(bnad, rx_id); in bnad_reinit_rx()
2145 if (bnad->rx_info[0].rx && !err) { in bnad_reinit_rx()
2146 bnad_restore_vlans(bnad, 0); in bnad_reinit_rx()
2147 bnad_enable_default_bcast(bnad); in bnad_reinit_rx()
2148 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_reinit_rx()
2149 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_reinit_rx()
2150 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_reinit_rx()
2159 bnad_destroy_rx(struct bnad *bnad, u32 rx_id) in bnad_destroy_rx() argument
2161 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_destroy_rx()
2162 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_destroy_rx()
2163 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_destroy_rx()
2171 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2172 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_destroy_rx()
2173 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { in bnad_destroy_rx()
2174 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); in bnad_destroy_rx()
2177 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2179 del_timer_sync(&bnad->dim_timer); in bnad_destroy_rx()
2182 init_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2183 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2185 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2186 wait_for_completion(&bnad->bnad_completions.rx_comp); in bnad_destroy_rx()
2189 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); in bnad_destroy_rx()
2191 bnad_napi_delete(bnad, rx_id); in bnad_destroy_rx()
2193 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_destroy_rx()
2198 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_destroy_rx()
2200 bnad_rx_res_free(bnad, res_info); in bnad_destroy_rx()
2205 bnad_setup_rx(struct bnad *bnad, u32 rx_id) in bnad_setup_rx() argument
2208 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; in bnad_setup_rx()
2209 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; in bnad_setup_rx()
2212 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; in bnad_setup_rx()
2228 bnad_init_rx_config(bnad, rx_config); in bnad_setup_rx()
2231 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2233 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2250 err = bnad_rx_res_alloc(bnad, res_info, rx_id); in bnad_setup_rx()
2254 bnad_rx_ctrl_init(bnad, rx_id); in bnad_setup_rx()
2257 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2258 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, in bnad_setup_rx()
2262 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2266 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2275 bnad_napi_add(bnad, rx_id); in bnad_setup_rx()
2279 err = bnad_rx_msix_register(bnad, rx_info, rx_id, in bnad_setup_rx()
2285 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_setup_rx()
2288 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) in bnad_setup_rx()
2289 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); in bnad_setup_rx()
2295 bnad_dim_timer_start(bnad); in bnad_setup_rx()
2299 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_setup_rx()
2304 bnad_destroy_rx(bnad, rx_id); in bnad_setup_rx()
2310 bnad_tx_coalescing_timeo_set(struct bnad *bnad) in bnad_tx_coalescing_timeo_set() argument
2314 tx_info = &bnad->tx_info[0]; in bnad_tx_coalescing_timeo_set()
2318 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); in bnad_tx_coalescing_timeo_set()
2323 bnad_rx_coalescing_timeo_set(struct bnad *bnad) in bnad_rx_coalescing_timeo_set() argument
2328 for (i = 0; i < bnad->num_rx; i++) { in bnad_rx_coalescing_timeo_set()
2329 rx_info = &bnad->rx_info[i]; in bnad_rx_coalescing_timeo_set()
2333 bnad->rx_coalescing_timeo); in bnad_rx_coalescing_timeo_set()
2341 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) in bnad_mac_addr_set_locked() argument
2349 if (!bnad->rx_info[0].rx) in bnad_mac_addr_set_locked()
2352 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); in bnad_mac_addr_set_locked()
2361 bnad_enable_default_bcast(struct bnad *bnad) in bnad_enable_default_bcast() argument
2363 struct bnad_rx_info *rx_info = &bnad->rx_info[0]; in bnad_enable_default_bcast()
2367 init_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2369 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2372 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_default_bcast()
2375 wait_for_completion(&bnad->bnad_completions.mcast_comp); in bnad_enable_default_bcast()
2379 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) in bnad_enable_default_bcast()
2387 bnad_restore_vlans(struct bnad *bnad, u32 rx_id) in bnad_restore_vlans() argument
2392 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { in bnad_restore_vlans()
2393 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_restore_vlans()
2394 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); in bnad_restore_vlans()
2395 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_restore_vlans()
2401 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) in bnad_netdev_qstats_fill() argument
2405 for (i = 0; i < bnad->num_rx; i++) { in bnad_netdev_qstats_fill()
2406 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netdev_qstats_fill()
2407 if (bnad->rx_info[i].rx_ctrl[j].ccb) { in bnad_netdev_qstats_fill()
2408 stats->rx_packets += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2410 stats->rx_bytes += bnad->rx_info[i]. in bnad_netdev_qstats_fill()
2412 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_netdev_qstats_fill()
2413 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_netdev_qstats_fill()
2416 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2419 bnad->rx_info[i].rx_ctrl[j]. in bnad_netdev_qstats_fill()
2425 for (i = 0; i < bnad->num_tx; i++) { in bnad_netdev_qstats_fill()
2426 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_netdev_qstats_fill()
2427 if (bnad->tx_info[i].tcb[j]) { in bnad_netdev_qstats_fill()
2429 bnad->tx_info[i].tcb[j]->txq->tx_packets; in bnad_netdev_qstats_fill()
2431 bnad->tx_info[i].tcb[j]->txq->tx_bytes; in bnad_netdev_qstats_fill()
2441 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) in bnad_netdev_hwstats_fill() argument
2447 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; in bnad_netdev_hwstats_fill()
2466 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_netdev_hwstats_fill()
2470 bnad->stats.bna_stats-> in bnad_netdev_hwstats_fill()
2479 bnad_mbox_irq_sync(struct bnad *bnad) in bnad_mbox_irq_sync() argument
2484 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2485 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_mbox_irq_sync()
2486 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; in bnad_mbox_irq_sync()
2488 irq = bnad->pcidev->irq; in bnad_mbox_irq_sync()
2489 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mbox_irq_sync()
2496 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) in bnad_tso_prepare() argument
2502 BNAD_UPDATE_CTR(bnad, tso_err); in bnad_tso_prepare()
2520 BNAD_UPDATE_CTR(bnad, tso4); in bnad_tso_prepare()
2528 BNAD_UPDATE_CTR(bnad, tso6); in bnad_tso_prepare()
2540 bnad_q_num_init(struct bnad *bnad) in bnad_q_num_init() argument
2547 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) in bnad_q_num_init()
2550 bnad->num_rx = 1; in bnad_q_num_init()
2551 bnad->num_tx = 1; in bnad_q_num_init()
2552 bnad->num_rxp_per_rx = rxps; in bnad_q_num_init()
2553 bnad->num_txq_per_tx = BNAD_TXQ_NUM; in bnad_q_num_init()
2563 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) in bnad_q_num_adjust() argument
2565 bnad->num_txq_per_tx = 1; in bnad_q_num_adjust()
2566 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_q_num_adjust()
2568 (bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_q_num_adjust()
2569 bnad->num_rxp_per_rx = msix_vectors - in bnad_q_num_adjust()
2570 (bnad->num_tx * bnad->num_txq_per_tx) - in bnad_q_num_adjust()
2573 bnad->num_rxp_per_rx = 1; in bnad_q_num_adjust()
2578 bnad_ioceth_disable(struct bnad *bnad) in bnad_ioceth_disable() argument
2583 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2584 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_disable()
2585 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); in bnad_ioceth_disable()
2586 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_disable()
2588 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_disable()
2591 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_disable()
2596 bnad_ioceth_enable(struct bnad *bnad) in bnad_ioceth_enable() argument
2601 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2602 init_completion(&bnad->bnad_completions.ioc_comp); in bnad_ioceth_enable()
2603 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; in bnad_ioceth_enable()
2604 bna_ioceth_enable(&bnad->bna.ioceth); in bnad_ioceth_enable()
2605 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_ioceth_enable()
2607 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, in bnad_ioceth_enable()
2610 err = bnad->bnad_completions.ioc_comp_status; in bnad_ioceth_enable()
2617 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, in bnad_res_free() argument
2623 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); in bnad_res_free()
2628 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, in bnad_res_alloc() argument
2634 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); in bnad_res_alloc()
2641 bnad_res_free(bnad, res_info, res_val_max); in bnad_res_alloc()
2647 bnad_enable_msix(struct bnad *bnad) in bnad_enable_msix() argument
2652 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2653 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_enable_msix()
2654 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2657 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2659 if (bnad->msix_table) in bnad_enable_msix()
2662 bnad->msix_table = in bnad_enable_msix()
2663 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); in bnad_enable_msix()
2665 if (!bnad->msix_table) in bnad_enable_msix()
2668 for (i = 0; i < bnad->msix_num; i++) in bnad_enable_msix()
2669 bnad->msix_table[i].entry = i; in bnad_enable_msix()
2671 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, in bnad_enable_msix()
2672 1, bnad->msix_num); in bnad_enable_msix()
2675 } else if (ret < bnad->msix_num) { in bnad_enable_msix()
2677 ret, bnad->msix_num); in bnad_enable_msix()
2679 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2681 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, in bnad_enable_msix()
2683 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2685 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + in bnad_enable_msix()
2688 if (bnad->msix_num > ret) { in bnad_enable_msix()
2689 pci_disable_msix(bnad->pcidev); in bnad_enable_msix()
2694 pci_intx(bnad->pcidev, 0); in bnad_enable_msix()
2701 kfree(bnad->msix_table); in bnad_enable_msix()
2702 bnad->msix_table = NULL; in bnad_enable_msix()
2703 bnad->msix_num = 0; in bnad_enable_msix()
2704 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_enable_msix()
2705 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_enable_msix()
2706 bnad_q_num_init(bnad); in bnad_enable_msix()
2707 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_enable_msix()
2711 bnad_disable_msix(struct bnad *bnad) in bnad_disable_msix() argument
2716 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_disable_msix()
2717 cfg_flags = bnad->cfg_flags; in bnad_disable_msix()
2718 if (bnad->cfg_flags & BNAD_CF_MSIX) in bnad_disable_msix()
2719 bnad->cfg_flags &= ~BNAD_CF_MSIX; in bnad_disable_msix()
2720 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_disable_msix()
2723 pci_disable_msix(bnad->pcidev); in bnad_disable_msix()
2724 kfree(bnad->msix_table); in bnad_disable_msix()
2725 bnad->msix_table = NULL; in bnad_disable_msix()
2734 struct bnad *bnad = netdev_priv(netdev); in bnad_open() local
2738 mutex_lock(&bnad->conf_mutex); in bnad_open()
2741 err = bnad_setup_tx(bnad, 0); in bnad_open()
2746 err = bnad_setup_rx(bnad, 0); in bnad_open()
2754 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2755 bna_enet_mtu_set(&bnad->bna.enet, in bnad_open()
2756 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); in bnad_open()
2757 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); in bnad_open()
2758 bna_enet_enable(&bnad->bna.enet); in bnad_open()
2759 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2762 bnad_enable_default_bcast(bnad); in bnad_open()
2765 bnad_restore_vlans(bnad, 0); in bnad_open()
2768 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_open()
2769 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_open()
2770 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_open()
2773 bnad_stats_timer_start(bnad); in bnad_open()
2775 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2780 bnad_destroy_tx(bnad, 0); in bnad_open()
2783 mutex_unlock(&bnad->conf_mutex); in bnad_open()
2790 struct bnad *bnad = netdev_priv(netdev); in bnad_stop() local
2793 mutex_lock(&bnad->conf_mutex); in bnad_stop()
2796 bnad_stats_timer_stop(bnad); in bnad_stop()
2798 init_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2800 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_stop()
2801 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, in bnad_stop()
2803 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_stop()
2805 wait_for_completion(&bnad->bnad_completions.enet_comp); in bnad_stop()
2807 bnad_destroy_tx(bnad, 0); in bnad_stop()
2808 bnad_destroy_rx(bnad, 0); in bnad_stop()
2811 bnad_mbox_irq_sync(bnad); in bnad_stop()
2813 mutex_unlock(&bnad->conf_mutex); in bnad_stop()
2821 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, in bnad_txq_wi_prepare() argument
2832 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { in bnad_txq_wi_prepare()
2841 if (unlikely(gso_size > bnad->netdev->mtu)) { in bnad_txq_wi_prepare()
2842 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); in bnad_txq_wi_prepare()
2849 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); in bnad_txq_wi_prepare()
2855 if (bnad_tso_prepare(bnad, skb)) { in bnad_txq_wi_prepare()
2856 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); in bnad_txq_wi_prepare()
2868 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { in bnad_txq_wi_prepare()
2869 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); in bnad_txq_wi_prepare()
2891 BNAD_UPDATE_CTR(bnad, tcpcsum_offload); in bnad_txq_wi_prepare()
2896 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); in bnad_txq_wi_prepare()
2905 BNAD_UPDATE_CTR(bnad, udpcsum_offload); in bnad_txq_wi_prepare()
2909 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); in bnad_txq_wi_prepare()
2914 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); in bnad_txq_wi_prepare()
2934 struct bnad *bnad = netdev_priv(netdev); in bnad_start_xmit() local
2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); in bnad_start_xmit()
2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); in bnad_start_xmit()
2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); in bnad_start_xmit()
2964 tcb = bnad->tx_info[0].tcb[txq_id]; in bnad_start_xmit()
2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); in bnad_start_xmit()
2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); in bnad_start_xmit()
2994 sent = bnad_txcmpl_process(bnad, tcb); in bnad_start_xmit()
3001 BNAD_UPDATE_CTR(bnad, netif_queue_stop); in bnad_start_xmit()
3011 BNAD_UPDATE_CTR(bnad, netif_queue_stop); in bnad_start_xmit()
3015 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); in bnad_start_xmit()
3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { in bnad_start_xmit()
3035 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, in bnad_start_xmit()
3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, in bnad_start_xmit()
3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); in bnad_start_xmit()
3066 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, in bnad_start_xmit()
3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); in bnad_start_xmit()
3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); in bnad_start_xmit()
3107 struct bnad *bnad = netdev_priv(netdev); in bnad_get_stats64() local
3110 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_stats64()
3112 bnad_netdev_qstats_fill(bnad, stats); in bnad_get_stats64()
3113 bnad_netdev_hwstats_fill(bnad, stats); in bnad_get_stats64()
3115 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_stats64()
3121 bnad_set_rx_ucast_fltr(struct bnad *bnad) in bnad_set_rx_ucast_fltr() argument
3123 struct net_device *netdev = bnad->netdev; in bnad_set_rx_ucast_fltr()
3130 if (netdev_uc_empty(bnad->netdev)) { in bnad_set_rx_ucast_fltr()
3131 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); in bnad_set_rx_ucast_fltr()
3135 if (uc_count > bna_attr(&bnad->bna)->num_ucmac) in bnad_set_rx_ucast_fltr()
3149 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, in bnad_set_rx_ucast_fltr()
3160 bnad->cfg_flags |= BNAD_CF_DEFAULT; in bnad_set_rx_ucast_fltr()
3161 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); in bnad_set_rx_ucast_fltr()
3165 bnad_set_rx_mcast_fltr(struct bnad *bnad) in bnad_set_rx_mcast_fltr() argument
3167 struct net_device *netdev = bnad->netdev; in bnad_set_rx_mcast_fltr()
3178 if (mc_count > bna_attr(&bnad->bna)->num_mcmac) in bnad_set_rx_mcast_fltr()
3190 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, in bnad_set_rx_mcast_fltr()
3200 bnad->cfg_flags |= BNAD_CF_ALLMULTI; in bnad_set_rx_mcast_fltr()
3201 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); in bnad_set_rx_mcast_fltr()
3207 struct bnad *bnad = netdev_priv(netdev); in bnad_set_rx_mode() local
3211 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3213 if (bnad->rx_info[0].rx == NULL) { in bnad_set_rx_mode()
3214 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3219 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | in bnad_set_rx_mode()
3225 bnad->cfg_flags |= BNAD_CF_PROMISC; in bnad_set_rx_mode()
3227 bnad_set_rx_mcast_fltr(bnad); in bnad_set_rx_mode()
3229 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) in bnad_set_rx_mode()
3232 bnad_set_rx_ucast_fltr(bnad); in bnad_set_rx_mode()
3234 if (bnad->cfg_flags & BNAD_CF_DEFAULT) in bnad_set_rx_mode()
3240 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); in bnad_set_rx_mode()
3242 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_rx_mode()
3254 struct bnad *bnad = netdev_priv(netdev); in bnad_set_mac_address() local
3258 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_mac_address()
3260 err = bnad_mac_addr_set_locked(bnad, sa->sa_data); in bnad_set_mac_address()
3265 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_mac_address()
3271 bnad_mtu_set(struct bnad *bnad, int frame_size) in bnad_mtu_set() argument
3275 init_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3277 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_mtu_set()
3278 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); in bnad_mtu_set()
3279 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_mtu_set()
3281 wait_for_completion(&bnad->bnad_completions.mtu_comp); in bnad_mtu_set()
3283 return bnad->bnad_completions.mtu_comp_status; in bnad_mtu_set()
3290 struct bnad *bnad = netdev_priv(netdev); in bnad_change_mtu() local
3296 mutex_lock(&bnad->conf_mutex); in bnad_change_mtu()
3305 if (BNAD_PCI_DEV_IS_CAT2(bnad) && in bnad_change_mtu()
3306 netif_running(bnad->netdev)) { in bnad_change_mtu()
3310 rx_count = bnad_reinit_rx(bnad); in bnad_change_mtu()
3316 err = bnad_mtu_set(bnad, new_frame); in bnad_change_mtu()
3320 mutex_unlock(&bnad->conf_mutex); in bnad_change_mtu()
3327 struct bnad *bnad = netdev_priv(netdev); in bnad_vlan_rx_add_vid() local
3330 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_add_vid()
3333 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3335 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3336 bna_rx_vlan_add(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_add_vid()
3337 set_bit(vid, bnad->active_vlans); in bnad_vlan_rx_add_vid()
3338 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_add_vid()
3340 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_add_vid()
3348 struct bnad *bnad = netdev_priv(netdev); in bnad_vlan_rx_kill_vid() local
3351 if (!bnad->rx_info[0].rx) in bnad_vlan_rx_kill_vid()
3354 mutex_lock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3356 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3357 clear_bit(vid, bnad->active_vlans); in bnad_vlan_rx_kill_vid()
3358 bna_rx_vlan_del(bnad->rx_info[0].rx, vid); in bnad_vlan_rx_kill_vid()
3359 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_vlan_rx_kill_vid()
3361 mutex_unlock(&bnad->conf_mutex); in bnad_vlan_rx_kill_vid()
3368 struct bnad *bnad = netdev_priv(dev); in bnad_set_features() local
3374 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_features()
3377 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); in bnad_set_features()
3379 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); in bnad_set_features()
3381 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_features()
3391 struct bnad *bnad = netdev_priv(netdev); in bnad_netpoll() local
3397 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { in bnad_netpoll()
3398 bna_intx_disable(&bnad->bna, curr_mask); in bnad_netpoll()
3399 bnad_isr(bnad->pcidev->irq, netdev); in bnad_netpoll()
3400 bna_intx_enable(&bnad->bna, curr_mask); in bnad_netpoll()
3408 for (i = 0; i < bnad->num_rx; i++) { in bnad_netpoll()
3409 rx_info = &bnad->rx_info[i]; in bnad_netpoll()
3412 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_netpoll()
3415 bnad_netif_rx_schedule_poll(bnad, in bnad_netpoll()
3441 bnad_netdev_init(struct bnad *bnad, bool using_dac) in bnad_netdev_init() argument
3443 struct net_device *netdev = bnad->netdev; in bnad_netdev_init()
3459 netdev->mem_start = bnad->mmio_start; in bnad_netdev_init()
3460 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; in bnad_netdev_init()
3473 bnad_init(struct bnad *bnad, in bnad_init() argument
3481 bnad->netdev = netdev; in bnad_init()
3482 bnad->pcidev = pdev; in bnad_init()
3483 bnad->mmio_start = pci_resource_start(pdev, 0); in bnad_init()
3484 bnad->mmio_len = pci_resource_len(pdev, 0); in bnad_init()
3485 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); in bnad_init()
3486 if (!bnad->bar0) { in bnad_init()
3490 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, in bnad_init()
3491 (unsigned long long) bnad->mmio_len); in bnad_init()
3493 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_init()
3495 bnad->cfg_flags = BNAD_CF_MSIX; in bnad_init()
3497 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; in bnad_init()
3499 bnad_q_num_init(bnad); in bnad_init()
3500 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_init()
3502 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + in bnad_init()
3503 (bnad->num_rx * bnad->num_rxp_per_rx) + in bnad_init()
3506 bnad->txq_depth = BNAD_TXQ_DEPTH; in bnad_init()
3507 bnad->rxq_depth = BNAD_RXQ_DEPTH; in bnad_init()
3509 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; in bnad_init()
3510 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; in bnad_init()
3512 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); in bnad_init()
3513 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); in bnad_init()
3514 if (!bnad->work_q) { in bnad_init()
3515 iounmap(bnad->bar0); in bnad_init()
3528 bnad_uninit(struct bnad *bnad) in bnad_uninit() argument
3530 if (bnad->work_q) { in bnad_uninit()
3531 flush_workqueue(bnad->work_q); in bnad_uninit()
3532 destroy_workqueue(bnad->work_q); in bnad_uninit()
3533 bnad->work_q = NULL; in bnad_uninit()
3536 if (bnad->bar0) in bnad_uninit()
3537 iounmap(bnad->bar0); in bnad_uninit()
3547 bnad_lock_init(struct bnad *bnad) in bnad_lock_init() argument
3549 spin_lock_init(&bnad->bna_lock); in bnad_lock_init()
3550 mutex_init(&bnad->conf_mutex); in bnad_lock_init()
3555 bnad_lock_uninit(struct bnad *bnad) in bnad_lock_uninit() argument
3557 mutex_destroy(&bnad->conf_mutex); in bnad_lock_uninit()
3563 bnad_pci_init(struct bnad *bnad, in bnad_pci_init() argument
3606 struct bnad *bnad; in bnad_pci_probe() local
3627 netdev = alloc_etherdev(sizeof(struct bnad)); in bnad_pci_probe()
3632 bnad = netdev_priv(netdev); in bnad_pci_probe()
3633 bnad_lock_init(bnad); in bnad_pci_probe()
3634 bnad_add_to_list(bnad); in bnad_pci_probe()
3636 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3643 err = bnad_pci_init(bnad, pdev, &using_dac); in bnad_pci_probe()
3651 err = bnad_init(bnad, pdev, netdev); in bnad_pci_probe()
3656 bnad_netdev_init(bnad, using_dac); in bnad_pci_probe()
3663 bnad_debugfs_init(bnad); in bnad_pci_probe()
3666 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3667 bna_res_req(&bnad->res_info[0]); in bnad_pci_probe()
3668 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3671 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3675 bna = &bnad->bna; in bnad_pci_probe()
3678 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); in bnad_pci_probe()
3679 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); in bnad_pci_probe()
3680 pcidev_info.device_id = bnad->pcidev->device; in bnad_pci_probe()
3681 pcidev_info.pci_bar_kva = bnad->bar0; in bnad_pci_probe()
3683 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3684 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); in bnad_pci_probe()
3685 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3687 bnad->stats.bna_stats = &bna->stats; in bnad_pci_probe()
3689 bnad_enable_msix(bnad); in bnad_pci_probe()
3690 err = bnad_mbox_irq_alloc(bnad); in bnad_pci_probe()
3695 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, in bnad_pci_probe()
3696 ((unsigned long)bnad)); in bnad_pci_probe()
3697 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, in bnad_pci_probe()
3698 ((unsigned long)bnad)); in bnad_pci_probe()
3699 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, in bnad_pci_probe()
3700 ((unsigned long)bnad)); in bnad_pci_probe()
3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, in bnad_pci_probe()
3702 ((unsigned long)bnad)); in bnad_pci_probe()
3709 err = bnad_ioceth_enable(bnad); in bnad_pci_probe()
3716 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3719 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, in bnad_pci_probe()
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3729 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3730 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3731 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3733 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3739 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3740 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); in bnad_pci_probe()
3741 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3744 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3745 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); in bnad_pci_probe()
3746 bnad_set_netdev_perm_addr(bnad); in bnad_pci_probe()
3747 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3749 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3757 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); in bnad_pci_probe()
3762 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3766 mutex_lock(&bnad->conf_mutex); in bnad_pci_probe()
3767 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_probe()
3769 bnad_ioceth_disable(bnad); in bnad_pci_probe()
3770 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_probe()
3771 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_probe()
3772 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_probe()
3773 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_probe()
3775 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_probe()
3776 bnad_mbox_irq_free(bnad); in bnad_pci_probe()
3777 bnad_disable_msix(bnad); in bnad_pci_probe()
3779 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_probe()
3782 kfree(bnad->regdata); in bnad_pci_probe()
3783 bnad_debugfs_uninit(bnad); in bnad_pci_probe()
3784 bnad_uninit(bnad); in bnad_pci_probe()
3788 mutex_unlock(&bnad->conf_mutex); in bnad_pci_probe()
3789 bnad_remove_from_list(bnad); in bnad_pci_probe()
3790 bnad_lock_uninit(bnad); in bnad_pci_probe()
3799 struct bnad *bnad; in bnad_pci_remove() local
3807 bnad = netdev_priv(netdev); in bnad_pci_remove()
3808 bna = &bnad->bna; in bnad_pci_remove()
3810 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) in bnad_pci_remove()
3813 mutex_lock(&bnad->conf_mutex); in bnad_pci_remove()
3814 bnad_ioceth_disable(bnad); in bnad_pci_remove()
3815 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); in bnad_pci_remove()
3816 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); in bnad_pci_remove()
3817 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); in bnad_pci_remove()
3818 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_pci_remove()
3820 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_pci_remove()
3822 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); in bnad_pci_remove()
3823 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); in bnad_pci_remove()
3824 bnad_mbox_irq_free(bnad); in bnad_pci_remove()
3825 bnad_disable_msix(bnad); in bnad_pci_remove()
3827 mutex_unlock(&bnad->conf_mutex); in bnad_pci_remove()
3828 bnad_remove_from_list(bnad); in bnad_pci_remove()
3829 bnad_lock_uninit(bnad); in bnad_pci_remove()
3831 kfree(bnad->regdata); in bnad_pci_remove()
3832 bnad_debugfs_uninit(bnad); in bnad_pci_remove()
3833 bnad_uninit(bnad); in bnad_pci_remove()