Lines Matching refs:bnad
297 struct bnad *bnad = netdev_priv(netdev); in bnad_get_drvinfo() local
306 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_drvinfo()
307 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); in bnad_get_drvinfo()
308 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_drvinfo()
315 strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev), in bnad_get_drvinfo()
329 struct bnad *bnad = netdev_priv(netdev); in bnad_get_coalesce() local
333 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_coalesce()
335 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; in bnad_get_coalesce()
336 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_coalesce()
338 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * in bnad_get_coalesce()
340 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * in bnad_get_coalesce()
350 struct bnad *bnad = netdev_priv(netdev); in bnad_set_coalesce() local
364 mutex_lock(&bnad->conf_mutex); in bnad_set_coalesce()
370 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_coalesce()
372 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { in bnad_set_coalesce()
373 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; in bnad_set_coalesce()
374 bnad_dim_timer_start(bnad); in bnad_set_coalesce()
377 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { in bnad_set_coalesce()
378 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; in bnad_set_coalesce()
379 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && in bnad_set_coalesce()
381 &bnad->run_flags)) { in bnad_set_coalesce()
383 &bnad->run_flags); in bnad_set_coalesce()
386 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_coalesce()
388 del_timer_sync(&bnad->dim_timer); in bnad_set_coalesce()
389 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_coalesce()
390 bnad_rx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
393 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / in bnad_set_coalesce()
395 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / in bnad_set_coalesce()
397 bnad_tx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
400 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / in bnad_set_coalesce()
402 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / in bnad_set_coalesce()
405 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) in bnad_set_coalesce()
406 bnad_rx_coalescing_timeo_set(bnad); in bnad_set_coalesce()
412 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_coalesce()
414 mutex_unlock(&bnad->conf_mutex); in bnad_set_coalesce()
422 struct bnad *bnad = netdev_priv(netdev); in bnad_get_ringparam() local
427 ringparam->rx_pending = bnad->rxq_depth; in bnad_get_ringparam()
428 ringparam->tx_pending = bnad->txq_depth; in bnad_get_ringparam()
436 struct bnad *bnad = netdev_priv(netdev); in bnad_set_ringparam() local
439 mutex_lock(&bnad->conf_mutex); in bnad_set_ringparam()
440 if (ringparam->rx_pending == bnad->rxq_depth && in bnad_set_ringparam()
441 ringparam->tx_pending == bnad->txq_depth) { in bnad_set_ringparam()
442 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
449 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
455 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
459 if (ringparam->rx_pending != bnad->rxq_depth) { in bnad_set_ringparam()
460 bnad->rxq_depth = ringparam->rx_pending; in bnad_set_ringparam()
462 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
466 for (i = 0; i < bnad->num_rx; i++) { in bnad_set_ringparam()
467 if (!bnad->rx_info[i].rx) in bnad_set_ringparam()
469 bnad_destroy_rx(bnad, i); in bnad_set_ringparam()
470 current_err = bnad_setup_rx(bnad, i); in bnad_set_ringparam()
475 if (!err && bnad->rx_info[0].rx) { in bnad_set_ringparam()
477 bnad_restore_vlans(bnad, 0); in bnad_set_ringparam()
478 bnad_enable_default_bcast(bnad); in bnad_set_ringparam()
479 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_ringparam()
480 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); in bnad_set_ringparam()
481 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_ringparam()
482 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | in bnad_set_ringparam()
487 if (ringparam->tx_pending != bnad->txq_depth) { in bnad_set_ringparam()
488 bnad->txq_depth = ringparam->tx_pending; in bnad_set_ringparam()
490 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
494 for (i = 0; i < bnad->num_tx; i++) { in bnad_set_ringparam()
495 if (!bnad->tx_info[i].tx) in bnad_set_ringparam()
497 bnad_destroy_tx(bnad, i); in bnad_set_ringparam()
498 current_err = bnad_setup_tx(bnad, i); in bnad_set_ringparam()
504 mutex_unlock(&bnad->conf_mutex); in bnad_set_ringparam()
512 struct bnad *bnad = netdev_priv(netdev); in bnad_get_pauseparam() local
515 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; in bnad_get_pauseparam()
516 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; in bnad_get_pauseparam()
523 struct bnad *bnad = netdev_priv(netdev); in bnad_set_pauseparam() local
530 mutex_lock(&bnad->conf_mutex); in bnad_set_pauseparam()
531 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || in bnad_set_pauseparam()
532 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { in bnad_set_pauseparam()
535 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_pauseparam()
536 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); in bnad_set_pauseparam()
537 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_pauseparam()
539 mutex_unlock(&bnad->conf_mutex); in bnad_set_pauseparam()
546 struct bnad *bnad = netdev_priv(netdev); in bnad_get_strings() local
550 mutex_lock(&bnad->conf_mutex); in bnad_get_strings()
561 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_strings()
592 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_strings()
620 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_strings()
621 if (!bnad->rx_info[i].rx) in bnad_get_strings()
623 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_get_strings()
646 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_strings()
647 if (!bnad->rx_info[i].rx) in bnad_get_strings()
649 for (j = 0; j < bnad->num_rxp_per_rx; j++) { in bnad_get_strings()
664 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_get_strings()
665 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_get_strings()
667 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_get_strings()
691 for (i = 0; i < bnad->num_tx; i++) { in bnad_get_strings()
692 if (!bnad->tx_info[i].tx) in bnad_get_strings()
694 for (j = 0; j < bnad->num_txq_per_tx; j++) { in bnad_get_strings()
716 mutex_unlock(&bnad->conf_mutex); in bnad_get_strings()
722 struct bnad *bnad = netdev_priv(netdev); in bnad_get_stats_count_locked() local
726 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_stats_count_locked()
732 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_stats_count_locked()
742 for (i = 0; i < bnad->num_rx; i++) { in bnad_get_stats_count_locked()
743 if (!bnad->rx_info[i].rx) in bnad_get_stats_count_locked()
745 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; in bnad_get_stats_count_locked()
746 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; in bnad_get_stats_count_locked()
747 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_get_stats_count_locked()
748 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_get_stats_count_locked()
749 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_get_stats_count_locked()
750 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) in bnad_get_stats_count_locked()
754 for (i = 0; i < bnad->num_tx; i++) { in bnad_get_stats_count_locked()
755 if (!bnad->tx_info[i].tx) in bnad_get_stats_count_locked()
757 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; in bnad_get_stats_count_locked()
763 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) in bnad_per_q_stats_fill() argument
769 for (i = 0; i < bnad->num_rx; i++) { in bnad_per_q_stats_fill()
770 if (!bnad->rx_info[i].rx) in bnad_per_q_stats_fill()
772 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_per_q_stats_fill()
773 if (bnad->rx_info[i].rx_ctrl[j].ccb && in bnad_per_q_stats_fill()
774 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && in bnad_per_q_stats_fill()
775 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { in bnad_per_q_stats_fill()
776 buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
779 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
782 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
784 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
786 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
788 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
790 buf[bi++] = bnad->rx_info[i]. in bnad_per_q_stats_fill()
794 for (i = 0; i < bnad->num_rx; i++) { in bnad_per_q_stats_fill()
795 if (!bnad->rx_info[i].rx) in bnad_per_q_stats_fill()
797 for (j = 0; j < bnad->num_rxp_per_rx; j++) in bnad_per_q_stats_fill()
798 if (bnad->rx_info[i].rx_ctrl[j].ccb) { in bnad_per_q_stats_fill()
799 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && in bnad_per_q_stats_fill()
800 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_per_q_stats_fill()
802 rcb = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
813 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && in bnad_per_q_stats_fill()
814 bnad->rx_info[i].rx_ctrl[j].ccb-> in bnad_per_q_stats_fill()
816 rcb = bnad->rx_info[i].rx_ctrl[j]. in bnad_per_q_stats_fill()
830 for (i = 0; i < bnad->num_tx; i++) { in bnad_per_q_stats_fill()
831 if (!bnad->tx_info[i].tx) in bnad_per_q_stats_fill()
833 for (j = 0; j < bnad->num_txq_per_tx; j++) in bnad_per_q_stats_fill()
834 if (bnad->tx_info[i].tcb[j] && in bnad_per_q_stats_fill()
835 bnad->tx_info[i].tcb[j]->txq) { in bnad_per_q_stats_fill()
836 tcb = bnad->tx_info[i].tcb[j]; in bnad_per_q_stats_fill()
852 struct bnad *bnad = netdev_priv(netdev); in bnad_get_ethtool_stats() local
859 mutex_lock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
861 mutex_unlock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
869 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_ethtool_stats()
874 bnad_netdev_qstats_fill(bnad, net_stats64); in bnad_get_ethtool_stats()
875 bnad_netdev_hwstats_fill(bnad, net_stats64); in bnad_get_ethtool_stats()
880 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); in bnad_get_ethtool_stats()
883 stats64 = (u64 *)&bnad->stats.drv_stats; in bnad_get_ethtool_stats()
888 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; in bnad_get_ethtool_stats()
896 bmap = bna_tx_rid_mask(&bnad->bna); in bnad_get_ethtool_stats()
899 stats64 = (u64 *)&bnad->stats.bna_stats-> in bnad_get_ethtool_stats()
909 bmap = bna_rx_rid_mask(&bnad->bna); in bnad_get_ethtool_stats()
912 stats64 = (u64 *)&bnad->stats.bna_stats-> in bnad_get_ethtool_stats()
922 bi = bnad_per_q_stats_fill(bnad, buf, bi); in bnad_get_ethtool_stats()
924 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_ethtool_stats()
926 mutex_unlock(&bnad->conf_mutex); in bnad_get_ethtool_stats()
941 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, in bnad_get_flash_partition_by_offset() argument
953 fcomp.bnad = bnad; in bnad_get_flash_partition_by_offset()
957 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
958 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, in bnad_get_flash_partition_by_offset()
961 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
965 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_flash_partition_by_offset()
995 struct bnad *bnad = netdev_priv(netdev); in bnad_get_eeprom() local
1002 eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16); in bnad_get_eeprom()
1005 flash_part = bnad_get_flash_partition_by_offset(bnad, in bnad_get_eeprom()
1010 fcomp.bnad = bnad; in bnad_get_eeprom()
1014 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_get_eeprom()
1015 ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, in bnad_get_eeprom()
1016 bnad->id, bytes, eeprom->len, in bnad_get_eeprom()
1020 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_eeprom()
1024 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_get_eeprom()
1035 struct bnad *bnad = netdev_priv(netdev); in bnad_set_eeprom() local
1042 if (eeprom->magic != (bnad->pcidev->vendor | in bnad_set_eeprom()
1043 (bnad->pcidev->device << 16))) in bnad_set_eeprom()
1047 flash_part = bnad_get_flash_partition_by_offset(bnad, in bnad_set_eeprom()
1052 fcomp.bnad = bnad; in bnad_set_eeprom()
1056 spin_lock_irqsave(&bnad->bna_lock, flags); in bnad_set_eeprom()
1057 ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, in bnad_set_eeprom()
1058 bnad->id, bytes, eeprom->len, in bnad_set_eeprom()
1062 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_eeprom()
1066 spin_unlock_irqrestore(&bnad->bna_lock, flags); in bnad_set_eeprom()
1076 struct bnad *bnad = netdev_priv(netdev); in bnad_flash_device() local
1081 ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev); in bnad_flash_device()
1087 fcomp.bnad = bnad; in bnad_flash_device()
1091 spin_lock_irq(&bnad->bna_lock); in bnad_flash_device()
1092 ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG, in bnad_flash_device()
1093 bnad->id, (u8 *)fw->data, fw->size, 0, in bnad_flash_device()
1098 spin_unlock_irq(&bnad->bna_lock); in bnad_flash_device()
1102 spin_unlock_irq(&bnad->bna_lock); in bnad_flash_device()