Searched refs:bp (Results 1 - 200 of 369) sorted by relevance

12

/linux-4.4.14/drivers/net/ethernet/broadcom/
H A Dbnx2.c249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) bnx2_tx_avail() argument
268 return bp->tx_ring_size - diff; bnx2_tx_avail()
272 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) bnx2_reg_rd_ind() argument
276 spin_lock_bh(&bp->indirect_lock); bnx2_reg_rd_ind()
277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); bnx2_reg_rd_ind()
278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); bnx2_reg_rd_ind()
279 spin_unlock_bh(&bp->indirect_lock); bnx2_reg_rd_ind()
284 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) bnx2_reg_wr_ind() argument
286 spin_lock_bh(&bp->indirect_lock); bnx2_reg_wr_ind()
287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); bnx2_reg_wr_ind()
288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); bnx2_reg_wr_ind()
289 spin_unlock_bh(&bp->indirect_lock); bnx2_reg_wr_ind()
293 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val) bnx2_shmem_wr() argument
295 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val); bnx2_shmem_wr()
299 bnx2_shmem_rd(struct bnx2 *bp, u32 offset) bnx2_shmem_rd() argument
301 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset); bnx2_shmem_rd()
305 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) bnx2_ctx_wr() argument
308 spin_lock_bh(&bp->indirect_lock); bnx2_ctx_wr()
309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_ctx_wr()
312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val); bnx2_ctx_wr()
313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL, bnx2_ctx_wr()
316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL); bnx2_ctx_wr()
322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); bnx2_ctx_wr()
323 BNX2_WR(bp, BNX2_CTX_DATA, val); bnx2_ctx_wr()
325 spin_unlock_bh(&bp->indirect_lock); bnx2_ctx_wr()
332 struct bnx2 *bp = netdev_priv(dev); bnx2_drv_ctl() local
337 bnx2_reg_wr_ind(bp, io->offset, io->data); bnx2_drv_ctl()
340 io->data = bnx2_reg_rd_ind(bp, io->offset); bnx2_drv_ctl()
343 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); bnx2_drv_ctl()
351 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) bnx2_setup_cnic_irq_info() argument
353 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_setup_cnic_irq_info()
354 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_setup_cnic_irq_info()
357 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_setup_cnic_irq_info()
360 sb_id = bp->irq_nvecs; bnx2_setup_cnic_irq_info()
370 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; bnx2_setup_cnic_irq_info()
381 struct bnx2 *bp = netdev_priv(dev); bnx2_register_cnic() local
382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_register_cnic()
390 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN)) bnx2_register_cnic()
393 bp->cnic_data = data; bnx2_register_cnic()
394 rcu_assign_pointer(bp->cnic_ops, ops); bnx2_register_cnic()
399 bnx2_setup_cnic_irq_info(bp); bnx2_register_cnic()
406 struct bnx2 *bp = netdev_priv(dev); bnx2_unregister_cnic() local
407 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_unregister_cnic()
408 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_unregister_cnic()
410 mutex_lock(&bp->cnic_lock); bnx2_unregister_cnic()
413 RCU_INIT_POINTER(bp->cnic_ops, NULL); bnx2_unregister_cnic()
414 mutex_unlock(&bp->cnic_lock); bnx2_unregister_cnic()
421 struct bnx2 *bp = netdev_priv(dev); bnx2_cnic_probe() local
422 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2_cnic_probe()
428 cp->chip_id = bp->chip_id; bnx2_cnic_probe()
429 cp->pdev = bp->pdev; bnx2_cnic_probe()
430 cp->io_base = bp->regview; bnx2_cnic_probe()
439 bnx2_cnic_stop(struct bnx2 *bp) bnx2_cnic_stop() argument
444 mutex_lock(&bp->cnic_lock); bnx2_cnic_stop()
445 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2_cnic_stop()
446 lockdep_is_held(&bp->cnic_lock)); bnx2_cnic_stop()
449 c_ops->cnic_ctl(bp->cnic_data, &info); bnx2_cnic_stop()
451 mutex_unlock(&bp->cnic_lock); bnx2_cnic_stop()
455 bnx2_cnic_start(struct bnx2 *bp) bnx2_cnic_start() argument
460 mutex_lock(&bp->cnic_lock); bnx2_cnic_start()
461 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2_cnic_start()
462 lockdep_is_held(&bp->cnic_lock)); bnx2_cnic_start()
464 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { bnx2_cnic_start()
465 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_cnic_start()
470 c_ops->cnic_ctl(bp->cnic_data, &info); bnx2_cnic_start()
472 mutex_unlock(&bp->cnic_lock); bnx2_cnic_start()
478 bnx2_cnic_stop(struct bnx2 *bp) bnx2_cnic_stop() argument
483 bnx2_cnic_start(struct bnx2 *bp) bnx2_cnic_start() argument
490 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) bnx2_read_phy() argument
495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_read_phy()
496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy()
499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_read_phy()
500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy()
505 val1 = (bp->phy_addr << 21) | (reg << 16) | bnx2_read_phy()
508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); bnx2_read_phy()
513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_read_phy()
517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_read_phy()
533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_read_phy()
534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy()
537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_read_phy()
538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_read_phy()
547 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) bnx2_write_phy() argument
552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_write_phy()
553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy()
556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_write_phy()
557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy()
562 val1 = (bp->phy_addr << 21) | (reg << 16) | val | bnx2_write_phy()
565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); bnx2_write_phy()
570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); bnx2_write_phy()
582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { bnx2_write_phy()
583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy()
586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); bnx2_write_phy()
587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); bnx2_write_phy()
596 bnx2_disable_int(struct bnx2 *bp) bnx2_disable_int() argument
601 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_disable_int()
602 bnapi = &bp->bnx2_napi[i]; bnx2_disable_int()
603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_disable_int()
606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); bnx2_disable_int()
610 bnx2_enable_int(struct bnx2 *bp) bnx2_enable_int() argument
615 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_enable_int()
616 bnapi = &bp->bnx2_napi[i]; bnx2_enable_int()
618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_enable_int()
623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_enable_int()
627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); bnx2_enable_int()
631 bnx2_disable_int_sync(struct bnx2 *bp) bnx2_disable_int_sync() argument
635 atomic_inc(&bp->intr_sem); bnx2_disable_int_sync()
636 if (!netif_running(bp->dev)) bnx2_disable_int_sync()
639 bnx2_disable_int(bp); bnx2_disable_int_sync()
640 for (i = 0; i < bp->irq_nvecs; i++) bnx2_disable_int_sync()
641 synchronize_irq(bp->irq_tbl[i].vector); bnx2_disable_int_sync()
645 bnx2_napi_disable(struct bnx2 *bp) bnx2_napi_disable() argument
649 for (i = 0; i < bp->irq_nvecs; i++) bnx2_napi_disable()
650 napi_disable(&bp->bnx2_napi[i].napi); bnx2_napi_disable()
654 bnx2_napi_enable(struct bnx2 *bp) bnx2_napi_enable() argument
658 for (i = 0; i < bp->irq_nvecs; i++) bnx2_napi_enable()
659 napi_enable(&bp->bnx2_napi[i].napi); bnx2_napi_enable()
663 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) bnx2_netif_stop() argument
666 bnx2_cnic_stop(bp); bnx2_netif_stop()
667 if (netif_running(bp->dev)) { bnx2_netif_stop()
668 bnx2_napi_disable(bp); bnx2_netif_stop()
669 netif_tx_disable(bp->dev); bnx2_netif_stop()
671 bnx2_disable_int_sync(bp); bnx2_netif_stop()
672 netif_carrier_off(bp->dev); /* prevent tx timeout */ bnx2_netif_stop()
676 bnx2_netif_start(struct bnx2 *bp, bool start_cnic) bnx2_netif_start() argument
678 if (atomic_dec_and_test(&bp->intr_sem)) { bnx2_netif_start()
679 if (netif_running(bp->dev)) { bnx2_netif_start()
680 netif_tx_wake_all_queues(bp->dev); bnx2_netif_start()
681 spin_lock_bh(&bp->phy_lock); bnx2_netif_start()
682 if (bp->link_up) bnx2_netif_start()
683 netif_carrier_on(bp->dev); bnx2_netif_start()
684 spin_unlock_bh(&bp->phy_lock); bnx2_netif_start()
685 bnx2_napi_enable(bp); bnx2_netif_start()
686 bnx2_enable_int(bp); bnx2_netif_start()
688 bnx2_cnic_start(bp); bnx2_netif_start()
694 bnx2_free_tx_mem(struct bnx2 *bp) bnx2_free_tx_mem() argument
698 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_free_tx_mem()
699 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_tx_mem()
703 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, bnx2_free_tx_mem()
714 bnx2_free_rx_mem(struct bnx2 *bp) bnx2_free_rx_mem() argument
718 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_free_rx_mem()
719 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_rx_mem()
723 for (j = 0; j < bp->rx_max_ring; j++) { bnx2_free_rx_mem()
725 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, bnx2_free_rx_mem()
733 for (j = 0; j < bp->rx_max_pg_ring; j++) { bnx2_free_rx_mem()
735 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, bnx2_free_rx_mem()
746 bnx2_alloc_tx_mem(struct bnx2 *bp) bnx2_alloc_tx_mem() argument
750 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_alloc_tx_mem()
751 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_alloc_tx_mem()
759 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, bnx2_alloc_tx_mem()
768 bnx2_alloc_rx_mem(struct bnx2 *bp) bnx2_alloc_rx_mem() argument
772 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_alloc_rx_mem()
773 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_alloc_rx_mem()
778 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); bnx2_alloc_rx_mem()
782 for (j = 0; j < bp->rx_max_ring; j++) { bnx2_alloc_rx_mem()
784 dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_rx_mem()
793 if (bp->rx_pg_ring_size) { bnx2_alloc_rx_mem()
795 bp->rx_max_pg_ring); bnx2_alloc_rx_mem()
801 for (j = 0; j < bp->rx_max_pg_ring; j++) { bnx2_alloc_rx_mem()
803 dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_rx_mem()
818 struct bnx2 *bp = netdev_priv(dev); bnx2_free_stats_blk() local
820 if (bp->status_blk) { bnx2_free_stats_blk()
821 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, bnx2_free_stats_blk()
822 bp->status_blk, bnx2_free_stats_blk()
823 bp->status_blk_mapping); bnx2_free_stats_blk()
824 bp->status_blk = NULL; bnx2_free_stats_blk()
825 bp->stats_blk = NULL; bnx2_free_stats_blk()
834 struct bnx2 *bp = netdev_priv(dev); bnx2_alloc_stats_blk() local
838 if (bp->flags & BNX2_FLAG_MSIX_CAP) bnx2_alloc_stats_blk()
841 bp->status_stats_size = status_blk_size + bnx2_alloc_stats_blk()
843 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, bnx2_alloc_stats_blk()
844 &bp->status_blk_mapping, GFP_KERNEL); bnx2_alloc_stats_blk()
848 bp->status_blk = status_blk; bnx2_alloc_stats_blk()
849 bp->stats_blk = status_blk + status_blk_size; bnx2_alloc_stats_blk()
850 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; bnx2_alloc_stats_blk()
856 bnx2_free_mem(struct bnx2 *bp) bnx2_free_mem() argument
859 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_free_mem()
861 bnx2_free_tx_mem(bp); bnx2_free_mem()
862 bnx2_free_rx_mem(bp); bnx2_free_mem()
864 for (i = 0; i < bp->ctx_pages; i++) { bnx2_free_mem()
865 if (bp->ctx_blk[i]) { bnx2_free_mem()
866 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE, bnx2_free_mem()
867 bp->ctx_blk[i], bnx2_free_mem()
868 bp->ctx_blk_mapping[i]); bnx2_free_mem()
869 bp->ctx_blk[i] = NULL; bnx2_free_mem()
878 bnx2_alloc_mem(struct bnx2 *bp) bnx2_alloc_mem() argument
883 bnapi = &bp->bnx2_napi[0]; bnx2_alloc_mem()
884 bnapi->status_blk.msi = bp->status_blk; bnx2_alloc_mem()
889 if (bp->flags & BNX2_FLAG_MSIX_CAP) { bnx2_alloc_mem()
890 for (i = 1; i < bp->irq_nvecs; i++) { bnx2_alloc_mem()
893 bnapi = &bp->bnx2_napi[i]; bnx2_alloc_mem()
895 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i); bnx2_alloc_mem()
905 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_alloc_mem()
906 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE; bnx2_alloc_mem()
907 if (bp->ctx_pages == 0) bnx2_alloc_mem()
908 bp->ctx_pages = 1; bnx2_alloc_mem()
909 for (i = 0; i < bp->ctx_pages; i++) { bnx2_alloc_mem()
910 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, bnx2_alloc_mem()
912 &bp->ctx_blk_mapping[i], bnx2_alloc_mem()
914 if (bp->ctx_blk[i] == NULL) bnx2_alloc_mem()
919 err = bnx2_alloc_rx_mem(bp); bnx2_alloc_mem()
923 err = bnx2_alloc_tx_mem(bp); bnx2_alloc_mem()
930 bnx2_free_mem(bp); bnx2_alloc_mem()
935 bnx2_report_fw_link(struct bnx2 *bp) bnx2_report_fw_link() argument
939 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_report_fw_link()
942 if (bp->link_up) { bnx2_report_fw_link()
945 switch (bp->line_speed) { bnx2_report_fw_link()
947 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link()
953 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link()
959 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link()
965 if (bp->duplex == DUPLEX_HALF) bnx2_report_fw_link()
974 if (bp->autoneg) { bnx2_report_fw_link()
977 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); bnx2_report_fw_link()
978 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); bnx2_report_fw_link()
981 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) bnx2_report_fw_link()
990 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status); bnx2_report_fw_link()
994 bnx2_xceiver_str(struct bnx2 *bp) bnx2_xceiver_str() argument
996 return (bp->phy_port == PORT_FIBRE) ? "SerDes" : bnx2_xceiver_str()
997 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : bnx2_xceiver_str()
1002 bnx2_report_link(struct bnx2 *bp) bnx2_report_link() argument
1004 if (bp->link_up) { bnx2_report_link()
1005 netif_carrier_on(bp->dev); bnx2_report_link()
1006 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex", bnx2_report_link()
1007 bnx2_xceiver_str(bp), bnx2_report_link()
1008 bp->line_speed, bnx2_report_link()
1009 bp->duplex == DUPLEX_FULL ? "full" : "half"); bnx2_report_link()
1011 if (bp->flow_ctrl) { bnx2_report_link()
1012 if (bp->flow_ctrl & FLOW_CTRL_RX) { bnx2_report_link()
1014 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_report_link()
1024 netif_carrier_off(bp->dev); bnx2_report_link()
1025 netdev_err(bp->dev, "NIC %s Link is Down\n", bnx2_report_link()
1026 bnx2_xceiver_str(bp)); bnx2_report_link()
1029 bnx2_report_fw_link(bp); bnx2_report_link()
1033 bnx2_resolve_flow_ctrl(struct bnx2 *bp) bnx2_resolve_flow_ctrl() argument
1037 bp->flow_ctrl = 0; bnx2_resolve_flow_ctrl()
1038 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != bnx2_resolve_flow_ctrl()
1041 if (bp->duplex == DUPLEX_FULL) { bnx2_resolve_flow_ctrl()
1042 bp->flow_ctrl = bp->req_flow_ctrl; bnx2_resolve_flow_ctrl()
1047 if (bp->duplex != DUPLEX_FULL) { bnx2_resolve_flow_ctrl()
1051 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_resolve_flow_ctrl()
1052 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { bnx2_resolve_flow_ctrl()
1055 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); bnx2_resolve_flow_ctrl()
1057 bp->flow_ctrl |= FLOW_CTRL_TX; bnx2_resolve_flow_ctrl()
1059 bp->flow_ctrl |= FLOW_CTRL_RX; bnx2_resolve_flow_ctrl()
1063 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_resolve_flow_ctrl()
1064 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_resolve_flow_ctrl()
1066 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_resolve_flow_ctrl()
1087 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; bnx2_resolve_flow_ctrl()
1090 bp->flow_ctrl = FLOW_CTRL_RX; bnx2_resolve_flow_ctrl()
1095 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; bnx2_resolve_flow_ctrl()
1103 bp->flow_ctrl = FLOW_CTRL_TX; bnx2_resolve_flow_ctrl()
1109 bnx2_5709s_linkup(struct bnx2 *bp) bnx2_5709s_linkup() argument
1113 bp->link_up = 1; bnx2_5709s_linkup()
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); bnx2_5709s_linkup()
1116 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); bnx2_5709s_linkup()
1117 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_5709s_linkup()
1119 if ((bp->autoneg & AUTONEG_SPEED) == 0) { bnx2_5709s_linkup()
1120 bp->line_speed = bp->req_line_speed; bnx2_5709s_linkup()
1121 bp->duplex = bp->req_duplex; bnx2_5709s_linkup()
1127 bp->line_speed = SPEED_10; bnx2_5709s_linkup()
1130 bp->line_speed = SPEED_100; bnx2_5709s_linkup()
1134 bp->line_speed = SPEED_1000; bnx2_5709s_linkup()
1137 bp->line_speed = SPEED_2500; bnx2_5709s_linkup()
1141 bp->duplex = DUPLEX_FULL; bnx2_5709s_linkup()
1143 bp->duplex = DUPLEX_HALF; bnx2_5709s_linkup()
1148 bnx2_5708s_linkup(struct bnx2 *bp) bnx2_5708s_linkup() argument
1152 bp->link_up = 1; bnx2_5708s_linkup()
1153 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); bnx2_5708s_linkup()
1156 bp->line_speed = SPEED_10; bnx2_5708s_linkup()
1159 bp->line_speed = SPEED_100; bnx2_5708s_linkup()
1162 bp->line_speed = SPEED_1000; bnx2_5708s_linkup()
1165 bp->line_speed = SPEED_2500; bnx2_5708s_linkup()
1169 bp->duplex = DUPLEX_FULL; bnx2_5708s_linkup()
1171 bp->duplex = DUPLEX_HALF; bnx2_5708s_linkup()
1177 bnx2_5706s_linkup(struct bnx2 *bp) bnx2_5706s_linkup() argument
1181 bp->link_up = 1; bnx2_5706s_linkup()
1182 bp->line_speed = SPEED_1000; bnx2_5706s_linkup()
1184 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706s_linkup()
1186 bp->duplex = DUPLEX_FULL; bnx2_5706s_linkup()
1189 bp->duplex = DUPLEX_HALF; bnx2_5706s_linkup()
1196 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_5706s_linkup()
1197 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_5706s_linkup()
1203 bp->duplex = DUPLEX_FULL; bnx2_5706s_linkup()
1206 bp->duplex = DUPLEX_HALF; bnx2_5706s_linkup()
1214 bnx2_copper_linkup(struct bnx2 *bp) bnx2_copper_linkup() argument
1218 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX; bnx2_copper_linkup()
1220 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_copper_linkup()
1224 bnx2_read_phy(bp, MII_CTRL1000, &local_adv); bnx2_copper_linkup()
1225 bnx2_read_phy(bp, MII_STAT1000, &remote_adv); bnx2_copper_linkup()
1229 bp->line_speed = SPEED_1000; bnx2_copper_linkup()
1230 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup()
1233 bp->line_speed = SPEED_1000; bnx2_copper_linkup()
1234 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup()
1237 bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_copper_linkup()
1238 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); bnx2_copper_linkup()
1242 bp->line_speed = SPEED_100; bnx2_copper_linkup()
1243 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup()
1246 bp->line_speed = SPEED_100; bnx2_copper_linkup()
1247 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup()
1250 bp->line_speed = SPEED_10; bnx2_copper_linkup()
1251 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup()
1254 bp->line_speed = SPEED_10; bnx2_copper_linkup()
1255 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup()
1258 bp->line_speed = 0; bnx2_copper_linkup()
1259 bp->link_up = 0; bnx2_copper_linkup()
1265 bp->line_speed = SPEED_100; bnx2_copper_linkup()
1268 bp->line_speed = SPEED_10; bnx2_copper_linkup()
1271 bp->duplex = DUPLEX_FULL; bnx2_copper_linkup()
1274 bp->duplex = DUPLEX_HALF; bnx2_copper_linkup()
1278 if (bp->link_up) { bnx2_copper_linkup()
1281 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status); bnx2_copper_linkup()
1283 bp->phy_flags |= BNX2_PHY_FLAG_MDIX; bnx2_copper_linkup()
1290 bnx2_init_rx_context(struct bnx2 *bp, u32 cid) bnx2_init_rx_context() argument
1298 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_init_rx_context()
1301 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); bnx2_init_rx_context()
1305 bnx2_init_all_rx_contexts(struct bnx2 *bp) bnx2_init_all_rx_contexts() argument
1310 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) { bnx2_init_all_rx_contexts()
1313 bnx2_init_rx_context(bp, cid); bnx2_init_all_rx_contexts()
1318 bnx2_set_mac_link(struct bnx2 *bp) bnx2_set_mac_link() argument
1322 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); bnx2_set_mac_link()
1323 if (bp->link_up && (bp->line_speed == SPEED_1000) && bnx2_set_mac_link()
1324 (bp->duplex == DUPLEX_HALF)) { bnx2_set_mac_link()
1325 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); bnx2_set_mac_link()
1329 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_mac_link()
1335 if (bp->link_up) { bnx2_set_mac_link()
1336 switch (bp->line_speed) { bnx2_set_mac_link()
1338 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) { bnx2_set_mac_link()
1359 if (bp->duplex == DUPLEX_HALF) bnx2_set_mac_link()
1361 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_set_mac_link()
1364 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; bnx2_set_mac_link()
1366 if (bp->flow_ctrl & FLOW_CTRL_RX) bnx2_set_mac_link()
1367 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; bnx2_set_mac_link()
1368 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); bnx2_set_mac_link()
1371 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE); bnx2_set_mac_link()
1374 if (bp->flow_ctrl & FLOW_CTRL_TX) bnx2_set_mac_link()
1376 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val); bnx2_set_mac_link()
1379 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); bnx2_set_mac_link()
1381 bnx2_init_all_rx_contexts(bp); bnx2_set_mac_link()
1385 bnx2_enable_bmsr1(struct bnx2 *bp) bnx2_enable_bmsr1() argument
1387 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_enable_bmsr1()
1388 (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_enable_bmsr1()
1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_bmsr1()
1394 bnx2_disable_bmsr1(struct bnx2 *bp) bnx2_disable_bmsr1() argument
1396 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_disable_bmsr1()
1397 (BNX2_CHIP(bp) == BNX2_CHIP_5709)) bnx2_disable_bmsr1()
1398 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_bmsr1()
1403 bnx2_test_and_enable_2g5(struct bnx2 *bp) bnx2_test_and_enable_2g5() argument
1408 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_test_and_enable_2g5()
1411 if (bp->autoneg & AUTONEG_SPEED) bnx2_test_and_enable_2g5()
1412 bp->advertising |= ADVERTISED_2500baseX_Full; bnx2_test_and_enable_2g5()
1414 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_enable_2g5()
1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_test_and_enable_2g5()
1417 bnx2_read_phy(bp, bp->mii_up1, &up1); bnx2_test_and_enable_2g5()
1420 bnx2_write_phy(bp, bp->mii_up1, up1); bnx2_test_and_enable_2g5()
1424 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_enable_2g5()
1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_test_and_enable_2g5()
1432 bnx2_test_and_disable_2g5(struct bnx2 *bp) bnx2_test_and_disable_2g5() argument
1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_test_and_disable_2g5()
1440 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_disable_2g5()
1441 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_test_and_disable_2g5()
1443 bnx2_read_phy(bp, bp->mii_up1, &up1); bnx2_test_and_disable_2g5()
1446 bnx2_write_phy(bp, bp->mii_up1, up1); bnx2_test_and_disable_2g5()
1450 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_and_disable_2g5()
1451 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_test_and_disable_2g5()
1458 bnx2_enable_forced_2g5(struct bnx2 *bp) bnx2_enable_forced_2g5() argument
1463 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_enable_forced_2g5()
1466 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_enable_forced_2g5()
1469 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_forced_2g5()
1471 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { bnx2_enable_forced_2g5()
1475 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); bnx2_enable_forced_2g5()
1478 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_enable_forced_2g5()
1480 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_enable_forced_2g5()
1482 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { bnx2_enable_forced_2g5()
1483 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_enable_forced_2g5()
1493 if (bp->autoneg & AUTONEG_SPEED) { bnx2_enable_forced_2g5()
1495 if (bp->req_duplex == DUPLEX_FULL) bnx2_enable_forced_2g5()
1498 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_enable_forced_2g5()
1502 bnx2_disable_forced_2g5(struct bnx2 *bp) bnx2_disable_forced_2g5() argument
1507 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_disable_forced_2g5()
1510 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_disable_forced_2g5()
1513 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_forced_2g5()
1515 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { bnx2_disable_forced_2g5()
1517 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); bnx2_disable_forced_2g5()
1520 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, bnx2_disable_forced_2g5()
1522 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_disable_forced_2g5()
1524 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { bnx2_disable_forced_2g5()
1525 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_disable_forced_2g5()
1535 if (bp->autoneg & AUTONEG_SPEED) bnx2_disable_forced_2g5()
1537 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_disable_forced_2g5()
1541 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) bnx2_5706s_force_link_dn() argument
1545 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); bnx2_5706s_force_link_dn()
1546 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); bnx2_5706s_force_link_dn()
1548 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); bnx2_5706s_force_link_dn()
1550 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); bnx2_5706s_force_link_dn()
1554 bnx2_set_link(struct bnx2 *bp) bnx2_set_link() argument
1559 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { bnx2_set_link()
1560 bp->link_up = 1; bnx2_set_link()
1564 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_set_link()
1567 link_up = bp->link_up; bnx2_set_link()
1569 bnx2_enable_bmsr1(bp); bnx2_set_link()
1570 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_set_link()
1571 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_set_link()
1572 bnx2_disable_bmsr1(bp); bnx2_set_link()
1574 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_set_link()
1575 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) { bnx2_set_link()
1578 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { bnx2_set_link()
1579 bnx2_5706s_force_link_dn(bp, 0); bnx2_set_link()
1580 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; bnx2_set_link()
1582 val = BNX2_RD(bp, BNX2_EMAC_STATUS); bnx2_set_link()
1584 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_set_link()
1585 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_set_link()
1586 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_set_link()
1596 bp->link_up = 1; bnx2_set_link()
1598 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_set_link()
1599 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_set_link()
1600 bnx2_5706s_linkup(bp); bnx2_set_link()
1601 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_set_link()
1602 bnx2_5708s_linkup(bp); bnx2_set_link()
1603 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_set_link()
1604 bnx2_5709s_linkup(bp); bnx2_set_link()
1607 bnx2_copper_linkup(bp); bnx2_set_link()
1609 bnx2_resolve_flow_ctrl(bp); bnx2_set_link()
1612 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_set_link()
1613 (bp->autoneg & AUTONEG_SPEED)) bnx2_set_link()
1614 bnx2_disable_forced_2g5(bp); bnx2_set_link()
1616 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { bnx2_set_link()
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_set_link()
1621 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_set_link()
1623 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_set_link()
1625 bp->link_up = 0; bnx2_set_link()
1628 if (bp->link_up != link_up) { bnx2_set_link()
1629 bnx2_report_link(bp); bnx2_set_link()
1632 bnx2_set_mac_link(bp); bnx2_set_link()
1638 bnx2_reset_phy(struct bnx2 *bp) bnx2_reset_phy() argument
1643 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); bnx2_reset_phy()
1649 bnx2_read_phy(bp, bp->mii_bmcr, &reg); bnx2_reset_phy()
1662 bnx2_phy_get_pause_adv(struct bnx2 *bp) bnx2_phy_get_pause_adv() argument
1666 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == bnx2_phy_get_pause_adv()
1669 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv()
1676 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { bnx2_phy_get_pause_adv()
1677 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv()
1684 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { bnx2_phy_get_pause_adv()
1685 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_phy_get_pause_adv()
1698 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1699 __releases(&bp->phy_lock)
1700 __acquires(&bp->phy_lock)
1704 pause_adv = bnx2_phy_get_pause_adv(bp);
1706 if (bp->autoneg & AUTONEG_SPEED) {
1708 if (bp->advertising & ADVERTISED_10baseT_Half)
1710 if (bp->advertising & ADVERTISED_10baseT_Full)
1712 if (bp->advertising & ADVERTISED_100baseT_Half)
1714 if (bp->advertising & ADVERTISED_100baseT_Full)
1716 if (bp->advertising & ADVERTISED_1000baseT_Full)
1718 if (bp->advertising & ADVERTISED_2500baseX_Full)
1721 if (bp->req_line_speed == SPEED_2500)
1723 else if (bp->req_line_speed == SPEED_1000)
1725 else if (bp->req_line_speed == SPEED_100) {
1726 if (bp->req_duplex == DUPLEX_FULL)
1730 } else if (bp->req_line_speed == SPEED_10) {
1731 if (bp->req_duplex == DUPLEX_FULL)
1747 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1749 spin_unlock_bh(&bp->phy_lock);
1750 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1751 spin_lock_bh(&bp->phy_lock);
1757 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1758 __releases(&bp->phy_lock)
1759 __acquires(&bp->phy_lock)
1764 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1765 return bnx2_setup_remote_phy(bp, port);
1767 if (!(bp->autoneg & AUTONEG_SPEED)) {
1771 if (bp->req_line_speed == SPEED_2500) {
1772 if (!bnx2_test_and_enable_2g5(bp))
1774 } else if (bp->req_line_speed == SPEED_1000) {
1775 if (bnx2_test_and_disable_2g5(bp))
1778 bnx2_read_phy(bp, bp->mii_adv, &adv);
1781 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1785 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1786 if (bp->req_line_speed == SPEED_2500)
1787 bnx2_enable_forced_2g5(bp); variable
1788 else if (bp->req_line_speed == SPEED_1000) {
1789 bnx2_disable_forced_2g5(bp); variable
1793 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1794 if (bp->req_line_speed == SPEED_2500)
1800 if (bp->req_duplex == DUPLEX_FULL) {
1810 if (bp->link_up) {
1811 bnx2_write_phy(bp, bp->mii_adv, adv &
1814 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1817 bp->link_up = 0;
1818 netif_carrier_off(bp->dev);
1819 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1820 bnx2_report_link(bp); variable
1822 bnx2_write_phy(bp, bp->mii_adv, adv);
1823 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1825 bnx2_resolve_flow_ctrl(bp); variable
1826 bnx2_set_mac_link(bp); variable
1831 bnx2_test_and_enable_2g5(bp); variable
1833 if (bp->advertising & ADVERTISED_1000baseT_Full)
1836 new_adv |= bnx2_phy_get_pause_adv(bp);
1838 bnx2_read_phy(bp, bp->mii_adv, &adv);
1839 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1841 bp->serdes_an_pending = 0;
1844 if (bp->link_up) {
1845 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1846 spin_unlock_bh(&bp->phy_lock);
1848 spin_lock_bh(&bp->phy_lock);
1851 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1852 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1862 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1863 bp->serdes_an_pending = 1;
1864 mod_timer(&bp->timer, jiffies + bp->current_interval);
1866 bnx2_resolve_flow_ctrl(bp); variable
1867 bnx2_set_mac_link(bp); variable
1874 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1889 bnx2_set_default_remote_link(struct bnx2 *bp) bnx2_set_default_remote_link() argument
1893 if (bp->phy_port == PORT_TP) bnx2_set_default_remote_link()
1894 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK); bnx2_set_default_remote_link()
1896 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK); bnx2_set_default_remote_link()
1899 bp->req_line_speed = 0; bnx2_set_default_remote_link()
1900 bp->autoneg |= AUTONEG_SPEED; bnx2_set_default_remote_link()
1901 bp->advertising = ADVERTISED_Autoneg; bnx2_set_default_remote_link()
1903 bp->advertising |= ADVERTISED_10baseT_Half; bnx2_set_default_remote_link()
1905 bp->advertising |= ADVERTISED_10baseT_Full; bnx2_set_default_remote_link()
1907 bp->advertising |= ADVERTISED_100baseT_Half; bnx2_set_default_remote_link()
1909 bp->advertising |= ADVERTISED_100baseT_Full; bnx2_set_default_remote_link()
1911 bp->advertising |= ADVERTISED_1000baseT_Full; bnx2_set_default_remote_link()
1913 bp->advertising |= ADVERTISED_2500baseX_Full; bnx2_set_default_remote_link()
1915 bp->autoneg = 0; bnx2_set_default_remote_link()
1916 bp->advertising = 0; bnx2_set_default_remote_link()
1917 bp->req_duplex = DUPLEX_FULL; bnx2_set_default_remote_link()
1919 bp->req_line_speed = SPEED_10; bnx2_set_default_remote_link()
1921 bp->req_duplex = DUPLEX_HALF; bnx2_set_default_remote_link()
1924 bp->req_line_speed = SPEED_100; bnx2_set_default_remote_link()
1926 bp->req_duplex = DUPLEX_HALF; bnx2_set_default_remote_link()
1929 bp->req_line_speed = SPEED_1000; bnx2_set_default_remote_link()
1931 bp->req_line_speed = SPEED_2500; bnx2_set_default_remote_link()
1936 bnx2_set_default_link(struct bnx2 *bp) bnx2_set_default_link() argument
1938 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_set_default_link()
1939 bnx2_set_default_remote_link(bp); bnx2_set_default_link()
1943 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; bnx2_set_default_link()
1944 bp->req_line_speed = 0; bnx2_set_default_link()
1945 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_set_default_link()
1948 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; bnx2_set_default_link()
1950 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG); bnx2_set_default_link()
1953 bp->autoneg = 0; bnx2_set_default_link()
1954 bp->req_line_speed = bp->line_speed = SPEED_1000; bnx2_set_default_link()
1955 bp->req_duplex = DUPLEX_FULL; bnx2_set_default_link()
1958 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; bnx2_set_default_link()
1962 bnx2_send_heart_beat(struct bnx2 *bp) bnx2_send_heart_beat() argument
1967 spin_lock(&bp->indirect_lock); bnx2_send_heart_beat()
1968 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); bnx2_send_heart_beat()
1969 addr = bp->shmem_base + BNX2_DRV_PULSE_MB; bnx2_send_heart_beat()
1970 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); bnx2_send_heart_beat()
1971 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); bnx2_send_heart_beat()
1972 spin_unlock(&bp->indirect_lock); bnx2_send_heart_beat()
1976 bnx2_remote_phy_event(struct bnx2 *bp) bnx2_remote_phy_event() argument
1979 u8 link_up = bp->link_up; bnx2_remote_phy_event()
1982 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); bnx2_remote_phy_event()
1985 bnx2_send_heart_beat(bp); bnx2_remote_phy_event()
1990 bp->link_up = 0; bnx2_remote_phy_event()
1994 bp->link_up = 1; bnx2_remote_phy_event()
1996 bp->duplex = DUPLEX_FULL; bnx2_remote_phy_event()
1999 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event()
2002 bp->line_speed = SPEED_10; bnx2_remote_phy_event()
2005 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event()
2009 bp->line_speed = SPEED_100; bnx2_remote_phy_event()
2012 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event()
2015 bp->line_speed = SPEED_1000; bnx2_remote_phy_event()
2018 bp->duplex = DUPLEX_HALF; bnx2_remote_phy_event()
2021 bp->line_speed = SPEED_2500; bnx2_remote_phy_event()
2024 bp->line_speed = 0; bnx2_remote_phy_event()
2028 bp->flow_ctrl = 0; bnx2_remote_phy_event()
2029 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != bnx2_remote_phy_event()
2031 if (bp->duplex == DUPLEX_FULL) bnx2_remote_phy_event()
2032 bp->flow_ctrl = bp->req_flow_ctrl; bnx2_remote_phy_event()
2035 bp->flow_ctrl |= FLOW_CTRL_TX; bnx2_remote_phy_event()
2037 bp->flow_ctrl |= FLOW_CTRL_RX; bnx2_remote_phy_event()
2040 old_port = bp->phy_port; bnx2_remote_phy_event()
2042 bp->phy_port = PORT_FIBRE; bnx2_remote_phy_event()
2044 bp->phy_port = PORT_TP; bnx2_remote_phy_event()
2046 if (old_port != bp->phy_port) bnx2_remote_phy_event()
2047 bnx2_set_default_link(bp); bnx2_remote_phy_event()
2050 if (bp->link_up != link_up) bnx2_remote_phy_event()
2051 bnx2_report_link(bp); bnx2_remote_phy_event()
2053 bnx2_set_mac_link(bp); bnx2_remote_phy_event()
2057 bnx2_set_remote_link(struct bnx2 *bp) bnx2_set_remote_link() argument
2061 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB); bnx2_set_remote_link()
2064 bnx2_remote_phy_event(bp); bnx2_set_remote_link()
2068 bnx2_send_heart_beat(bp); bnx2_set_remote_link()
2075 bnx2_setup_copper_phy(struct bnx2 *bp)
2076 __releases(&bp->phy_lock)
2077 __acquires(&bp->phy_lock)
2082 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2084 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2088 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2090 if (bp->autoneg & AUTONEG_SPEED) {
2094 new_adv |= bnx2_phy_get_pause_adv(bp);
2096 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2099 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2104 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2105 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2106 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2109 else if (bp->link_up) {
2113 bnx2_resolve_flow_ctrl(bp); variable
2114 bnx2_set_mac_link(bp); variable
2121 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2124 if (bp->req_line_speed == SPEED_100) {
2127 if (bp->req_duplex == DUPLEX_FULL) {
2133 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2134 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2138 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2139 spin_unlock_bh(&bp->phy_lock);
2141 spin_lock_bh(&bp->phy_lock);
2143 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2144 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2147 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2154 bp->line_speed = bp->req_line_speed;
2155 bp->duplex = bp->req_duplex;
2156 bnx2_resolve_flow_ctrl(bp); variable
2157 bnx2_set_mac_link(bp); variable
2160 bnx2_resolve_flow_ctrl(bp); variable
2161 bnx2_set_mac_link(bp); variable
2167 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2168 __releases(&bp->phy_lock)
2169 __acquires(&bp->phy_lock)
2171 if (bp->loopback == MAC_LOOPBACK)
2174 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2175 return bnx2_setup_serdes_phy(bp, port);
2178 return bnx2_setup_copper_phy(bp);
2183 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5709s_phy() argument
2187 bp->mii_bmcr = MII_BMCR + 0x10; bnx2_init_5709s_phy()
2188 bp->mii_bmsr = MII_BMSR + 0x10; bnx2_init_5709s_phy()
2189 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; bnx2_init_5709s_phy()
2190 bp->mii_adv = MII_ADVERTISE + 0x10; bnx2_init_5709s_phy()
2191 bp->mii_lpa = MII_LPA + 0x10; bnx2_init_5709s_phy()
2192 bp->mii_up1 = MII_BNX2_OVER1G_UP1; bnx2_init_5709s_phy()
2194 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); bnx2_init_5709s_phy()
2195 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); bnx2_init_5709s_phy()
2197 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_init_5709s_phy()
2199 bnx2_reset_phy(bp); bnx2_init_5709s_phy()
2201 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); bnx2_init_5709s_phy()
2203 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); bnx2_init_5709s_phy()
2206 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); bnx2_init_5709s_phy()
2208 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_init_5709s_phy()
2209 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); bnx2_init_5709s_phy()
2210 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) bnx2_init_5709s_phy()
2214 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); bnx2_init_5709s_phy()
2216 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); bnx2_init_5709s_phy()
2217 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); bnx2_init_5709s_phy()
2219 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); bnx2_init_5709s_phy()
2221 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); bnx2_init_5709s_phy()
2225 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); bnx2_init_5709s_phy()
2227 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); bnx2_init_5709s_phy()
2233 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5708s_phy() argument
2238 bnx2_reset_phy(bp); bnx2_init_5708s_phy()
2240 bp->mii_up1 = BCM5708S_UP1; bnx2_init_5708s_phy()
2242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); bnx2_init_5708s_phy()
2243 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); bnx2_init_5708s_phy()
2244 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); bnx2_init_5708s_phy()
2246 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); bnx2_init_5708s_phy()
2248 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); bnx2_init_5708s_phy()
2250 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); bnx2_init_5708s_phy()
2252 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); bnx2_init_5708s_phy()
2254 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { bnx2_init_5708s_phy()
2255 bnx2_read_phy(bp, BCM5708S_UP1, &val); bnx2_init_5708s_phy()
2257 bnx2_write_phy(bp, BCM5708S_UP1, val); bnx2_init_5708s_phy()
2260 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || bnx2_init_5708s_phy()
2261 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || bnx2_init_5708s_phy()
2262 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) { bnx2_init_5708s_phy()
2264 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy()
2266 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); bnx2_init_5708s_phy()
2268 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); bnx2_init_5708s_phy()
2269 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); bnx2_init_5708s_phy()
2272 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) & bnx2_init_5708s_phy()
2278 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); bnx2_init_5708s_phy()
2280 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy()
2282 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); bnx2_init_5708s_phy()
2283 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, bnx2_init_5708s_phy()
2291 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) bnx2_init_5706s_phy() argument
2294 bnx2_reset_phy(bp); bnx2_init_5706s_phy()
2296 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_init_5706s_phy()
2298 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_init_5706s_phy()
2299 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); bnx2_init_5706s_phy()
2301 if (bp->dev->mtu > 1500) { bnx2_init_5706s_phy()
2305 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_5706s_phy()
2306 bnx2_read_phy(bp, 0x18, &val); bnx2_init_5706s_phy()
2307 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); bnx2_init_5706s_phy()
2309 bnx2_write_phy(bp, 0x1c, 0x6c00); bnx2_init_5706s_phy()
2310 bnx2_read_phy(bp, 0x1c, &val); bnx2_init_5706s_phy()
2311 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); bnx2_init_5706s_phy()
2316 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_5706s_phy()
2317 bnx2_read_phy(bp, 0x18, &val); bnx2_init_5706s_phy()
2318 bnx2_write_phy(bp, 0x18, val & ~0x4007); bnx2_init_5706s_phy()
2320 bnx2_write_phy(bp, 0x1c, 0x6c00); bnx2_init_5706s_phy()
2321 bnx2_read_phy(bp, 0x1c, &val); bnx2_init_5706s_phy()
2322 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); bnx2_init_5706s_phy()
2329 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) bnx2_init_copper_phy() argument
2334 bnx2_reset_phy(bp); bnx2_init_copper_phy()
2336 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { bnx2_init_copper_phy()
2337 bnx2_write_phy(bp, 0x18, 0x0c00); bnx2_init_copper_phy()
2338 bnx2_write_phy(bp, 0x17, 0x000a); bnx2_init_copper_phy()
2339 bnx2_write_phy(bp, 0x15, 0x310b); bnx2_init_copper_phy()
2340 bnx2_write_phy(bp, 0x17, 0x201f); bnx2_init_copper_phy()
2341 bnx2_write_phy(bp, 0x15, 0x9506); bnx2_init_copper_phy()
2342 bnx2_write_phy(bp, 0x17, 0x401f); bnx2_init_copper_phy()
2343 bnx2_write_phy(bp, 0x15, 0x14e2); bnx2_init_copper_phy()
2344 bnx2_write_phy(bp, 0x18, 0x0400); bnx2_init_copper_phy()
2347 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { bnx2_init_copper_phy()
2348 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, bnx2_init_copper_phy()
2350 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); bnx2_init_copper_phy()
2352 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); bnx2_init_copper_phy()
2355 if (bp->dev->mtu > 1500) { bnx2_init_copper_phy()
2357 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_copper_phy()
2358 bnx2_read_phy(bp, 0x18, &val); bnx2_init_copper_phy()
2359 bnx2_write_phy(bp, 0x18, val | 0x4000); bnx2_init_copper_phy()
2361 bnx2_read_phy(bp, 0x10, &val); bnx2_init_copper_phy()
2362 bnx2_write_phy(bp, 0x10, val | 0x1); bnx2_init_copper_phy()
2365 bnx2_write_phy(bp, 0x18, 0x7); bnx2_init_copper_phy()
2366 bnx2_read_phy(bp, 0x18, &val); bnx2_init_copper_phy()
2367 bnx2_write_phy(bp, 0x18, val & ~0x4007); bnx2_init_copper_phy()
2369 bnx2_read_phy(bp, 0x10, &val); bnx2_init_copper_phy()
2370 bnx2_write_phy(bp, 0x10, val & ~0x1); bnx2_init_copper_phy()
2374 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL); bnx2_init_copper_phy()
2375 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val); bnx2_init_copper_phy()
2379 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_copper_phy()
2382 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val); bnx2_init_copper_phy()
2388 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2389 __releases(&bp->phy_lock)
2390 __acquires(&bp->phy_lock)
2395 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2396 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2398 bp->mii_bmcr = MII_BMCR;
2399 bp->mii_bmsr = MII_BMSR;
2400 bp->mii_bmsr1 = MII_BMSR;
2401 bp->mii_adv = MII_ADVERTISE;
2402 bp->mii_lpa = MII_LPA;
2404 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2406 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2409 bnx2_read_phy(bp, MII_PHYSID1, &val);
2410 bp->phy_id = val << 16;
2411 bnx2_read_phy(bp, MII_PHYSID2, &val);
2412 bp->phy_id |= val & 0xffff;
2414 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2415 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2416 rc = bnx2_init_5706s_phy(bp, reset_phy);
2417 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2418 rc = bnx2_init_5708s_phy(bp, reset_phy);
2419 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2420 rc = bnx2_init_5709s_phy(bp, reset_phy);
2423 rc = bnx2_init_copper_phy(bp, reset_phy);
2428 rc = bnx2_setup_phy(bp, bp->phy_port);
2434 bnx2_set_mac_loopback(struct bnx2 *bp) bnx2_set_mac_loopback() argument
2438 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_mac_loopback()
2441 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bnx2_set_mac_loopback()
2442 bp->link_up = 1; bnx2_set_mac_loopback()
2449 bnx2_set_phy_loopback(struct bnx2 *bp) bnx2_set_phy_loopback() argument
2454 spin_lock_bh(&bp->phy_lock); bnx2_set_phy_loopback()
2455 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | bnx2_set_phy_loopback()
2457 spin_unlock_bh(&bp->phy_lock); bnx2_set_phy_loopback()
2462 if (bnx2_test_link(bp) == 0) bnx2_set_phy_loopback()
2467 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_phy_loopback()
2473 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); bnx2_set_phy_loopback()
2474 bp->link_up = 1; bnx2_set_phy_loopback()
2479 bnx2_dump_mcp_state(struct bnx2 *bp) bnx2_dump_mcp_state() argument
2481 struct net_device *dev = bp->dev; bnx2_dump_mcp_state()
2485 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_dump_mcp_state()
2493 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1)); bnx2_dump_mcp_state()
2495 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE), bnx2_dump_mcp_state()
2496 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE), bnx2_dump_mcp_state()
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK)); bnx2_dump_mcp_state()
2499 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), bnx2_dump_mcp_state()
2500 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), bnx2_dump_mcp_state()
2501 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION)); bnx2_dump_mcp_state()
2504 bnx2_shmem_rd(bp, BNX2_DRV_MB), bnx2_dump_mcp_state()
2505 bnx2_shmem_rd(bp, BNX2_FW_MB), bnx2_dump_mcp_state()
2506 bnx2_shmem_rd(bp, BNX2_LINK_STATUS)); bnx2_dump_mcp_state()
2507 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB)); bnx2_dump_mcp_state()
2509 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE), bnx2_dump_mcp_state()
2510 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); bnx2_dump_mcp_state()
2512 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); bnx2_dump_mcp_state()
2513 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE); bnx2_dump_mcp_state()
2514 DP_SHMEM_LINE(bp, 0x3cc); bnx2_dump_mcp_state()
2515 DP_SHMEM_LINE(bp, 0x3dc); bnx2_dump_mcp_state()
2516 DP_SHMEM_LINE(bp, 0x3ec); bnx2_dump_mcp_state()
2517 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc)); bnx2_dump_mcp_state()
2522 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) bnx2_fw_sync() argument
2527 bp->fw_wr_seq++; bnx2_fw_sync()
2528 msg_data |= bp->fw_wr_seq; bnx2_fw_sync()
2529 bp->fw_last_msg = msg_data; bnx2_fw_sync()
2531 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); bnx2_fw_sync()
2540 val = bnx2_shmem_rd(bp, BNX2_FW_MB); bnx2_fw_sync()
2553 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); bnx2_fw_sync()
2556 bnx2_dump_mcp_state(bp); bnx2_fw_sync()
2569 bnx2_init_5709_context(struct bnx2 *bp) bnx2_init_5709_context() argument
2576 BNX2_WR(bp, BNX2_CTX_COMMAND, val); bnx2_init_5709_context()
2578 val = BNX2_RD(bp, BNX2_CTX_COMMAND); bnx2_init_5709_context()
2586 for (i = 0; i < bp->ctx_pages; i++) { bnx2_init_5709_context()
2589 if (bp->ctx_blk[i]) bnx2_init_5709_context()
2590 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE); bnx2_init_5709_context()
2594 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, bnx2_init_5709_context()
2595 (bp->ctx_blk_mapping[i] & 0xffffffff) | bnx2_init_5709_context()
2597 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, bnx2_init_5709_context()
2598 (u64) bp->ctx_blk_mapping[i] >> 32); bnx2_init_5709_context()
2599 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | bnx2_init_5709_context()
2603 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); bnx2_init_5709_context()
2617 bnx2_init_context(struct bnx2 *bp) bnx2_init_context() argument
2628 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_context()
2649 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); bnx2_init_context()
2650 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); bnx2_init_context()
2654 bnx2_ctx_wr(bp, vcid_addr, offset, 0); bnx2_init_context()
2660 bnx2_alloc_bad_rbuf(struct bnx2 *bp) bnx2_alloc_bad_rbuf() argument
2670 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_alloc_bad_rbuf()
2676 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); bnx2_alloc_bad_rbuf()
2678 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND, bnx2_alloc_bad_rbuf()
2681 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC); bnx2_alloc_bad_rbuf()
2691 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); bnx2_alloc_bad_rbuf()
2702 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val); bnx2_alloc_bad_rbuf()
2709 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) bnx2_set_mac_addr() argument
2715 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); bnx2_set_mac_addr()
2720 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); bnx2_set_mac_addr()
2724 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) bnx2_alloc_rx_page() argument
2734 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, bnx2_alloc_rx_page()
2736 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_alloc_rx_page()
2749 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) bnx2_free_rx_page() argument
2757 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), bnx2_free_rx_page()
2765 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) bnx2_alloc_rx_data() argument
2773 data = kmalloc(bp->rx_buf_size, gfp); bnx2_alloc_rx_data()
2777 mapping = dma_map_single(&bp->pdev->dev, bnx2_alloc_rx_data()
2779 bp->rx_buf_use_size, bnx2_alloc_rx_data()
2781 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_alloc_rx_data()
2792 rxr->rx_prod_bseq += bp->rx_buf_use_size; bnx2_alloc_rx_data()
2798 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) bnx2_phy_event_is_set() argument
2808 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); bnx2_phy_event_is_set()
2810 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); bnx2_phy_event_is_set()
2818 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_phy_int() argument
2820 spin_lock(&bp->phy_lock); bnx2_phy_int()
2822 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) bnx2_phy_int()
2823 bnx2_set_link(bp); bnx2_phy_int()
2824 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) bnx2_phy_int()
2825 bnx2_set_remote_link(bp); bnx2_phy_int()
2827 spin_unlock(&bp->phy_lock); bnx2_phy_int()
2846 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) bnx2_tx_int() argument
2854 index = (bnapi - bp->bnx2_napi); bnx2_tx_int()
2855 txq = netdev_get_tx_queue(bp->dev, index); bnx2_tx_int()
2887 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_tx_int()
2899 dma_unmap_page(&bp->pdev->dev, bnx2_tx_int()
2929 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { bnx2_tx_int()
2932 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) bnx2_tx_int()
2941 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, bnx2_reuse_rx_skb_pages() argument
2999 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, bnx2_reuse_rx_data() argument
3008 dma_sync_single_for_device(&bp->pdev->dev, bnx2_reuse_rx_data()
3012 rxr->rx_prod_bseq += bp->rx_buf_use_size; bnx2_reuse_rx_data()
3029 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, bnx2_rx_skb() argument
3037 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); bnx2_rx_skb()
3039 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); bnx2_rx_skb()
3045 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); bnx2_rx_skb()
3050 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bnx2_rx_skb()
3080 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, bnx2_rx_skb()
3105 err = bnx2_alloc_rx_page(bp, rxr, bnx2_rx_skb()
3111 bnx2_reuse_rx_skb_pages(bp, rxr, skb, bnx2_rx_skb()
3116 dma_unmap_page(&bp->pdev->dev, mapping_old, bnx2_rx_skb()
3148 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) bnx2_rx_int() argument
3187 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bnx2_rx_int()
3202 } else if (len > bp->rx_jumbo_thresh) { bnx2_rx_int()
3203 hdr_len = bp->rx_jumbo_thresh; bnx2_rx_int()
3213 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, bnx2_rx_int()
3220 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); bnx2_rx_int()
3227 if (len <= bp->rx_copy_thresh) { bnx2_rx_int()
3228 skb = netdev_alloc_skb(bp->dev, len + 6); bnx2_rx_int()
3230 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, bnx2_rx_int()
3242 bnx2_reuse_rx_data(bp, rxr, data, bnx2_rx_int()
3246 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, bnx2_rx_int()
3252 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) bnx2_rx_int()
3255 skb->protocol = eth_type_trans(skb, bp->dev); bnx2_rx_int()
3257 if (len > (bp->dev->mtu + ETH_HLEN) && bnx2_rx_int()
3267 if ((bp->dev->features & NETIF_F_RXCSUM) && bnx2_rx_int()
3275 if ((bp->dev->features & NETIF_F_RXHASH) && bnx2_rx_int()
3281 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); bnx2_rx_int()
3302 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); bnx2_rx_int()
3304 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod); bnx2_rx_int()
3306 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); bnx2_rx_int()
3321 struct bnx2 *bp = bnapi->bp; bnx2_msi() local
3324 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_msi()
3329 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_msi()
3341 struct bnx2 *bp = bnapi->bp; bnx2_msi_1shot() local
3346 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_msi_1shot()
3358 struct bnx2 *bp = bnapi->bp; bnx2_interrupt() local
3368 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) & bnx2_interrupt()
3372 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_interrupt()
3379 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); bnx2_interrupt()
3382 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnx2_interrupt()
3429 bnx2_chk_missed_msi(struct bnx2 *bp) bnx2_chk_missed_msi() argument
3431 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; bnx2_chk_missed_msi()
3435 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL); bnx2_chk_missed_msi()
3439 if (bnapi->last_status_idx == bp->idle_chk_status_idx) { bnx2_chk_missed_msi()
3440 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & bnx2_chk_missed_msi()
3442 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); bnx2_chk_missed_msi()
3443 bnx2_msi(bp->irq_tbl[0].vector, bnapi); bnx2_chk_missed_msi()
3447 bp->idle_chk_status_idx = bnapi->last_status_idx; bnx2_chk_missed_msi()
3451 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_poll_cnic() argument
3459 c_ops = rcu_dereference(bp->cnic_ops); bnx2_poll_cnic()
3461 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, bnx2_poll_cnic()
3467 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) bnx2_poll_link() argument
3476 bnx2_phy_int(bp, bnapi); bnx2_poll_link()
3481 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_poll_link()
3482 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_poll_link()
3483 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_poll_link()
3487 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, bnx2_poll_work() argument
3494 bnx2_tx_int(bp, bnapi, 0); bnx2_poll_work()
3497 work_done += bnx2_rx_int(bp, bnapi, budget - work_done); bnx2_poll_work()
3505 struct bnx2 *bp = bnapi->bp; bnx2_poll_msix() local
3510 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); bnx2_poll_msix()
3520 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | bnx2_poll_msix()
3532 struct bnx2 *bp = bnapi->bp; bnx2_poll() local
3537 bnx2_poll_link(bp, bnapi); bnx2_poll()
3539 work_done = bnx2_poll_work(bp, bnapi, work_done, budget); bnx2_poll()
3542 bnx2_poll_cnic(bp, bnapi); bnx2_poll()
3557 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { bnx2_poll()
3558 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll()
3563 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll()
3568 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnx2_poll()
3584 struct bnx2 *bp = netdev_priv(dev); bnx2_set_rx_mode() local
3592 spin_lock_bh(&bp->phy_lock); bnx2_set_rx_mode()
3594 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | bnx2_set_rx_mode()
3598 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) bnx2_set_rx_mode()
3608 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), bnx2_set_rx_mode()
3631 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3646 bnx2_set_mac_addr(bp, ha->addr, netdev_for_each_uc_addr()
3655 if (rx_mode != bp->rx_mode) {
3656 bp->rx_mode = rx_mode;
3657 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3660 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3661 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3662 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3664 spin_unlock_bh(&bp->phy_lock);
3694 static void bnx2_release_firmware(struct bnx2 *bp) bnx2_release_firmware() argument
3696 if (bp->rv2p_firmware) { bnx2_release_firmware()
3697 release_firmware(bp->mips_firmware); bnx2_release_firmware()
3698 release_firmware(bp->rv2p_firmware); bnx2_release_firmware()
3699 bp->rv2p_firmware = NULL; bnx2_release_firmware()
3703 static int bnx2_request_uncached_firmware(struct bnx2 *bp) bnx2_request_uncached_firmware() argument
3710 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_request_uncached_firmware()
3712 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) || bnx2_request_uncached_firmware()
3713 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1)) bnx2_request_uncached_firmware()
3722 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); bnx2_request_uncached_firmware()
3728 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); bnx2_request_uncached_firmware()
3733 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; bnx2_request_uncached_firmware()
3734 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; bnx2_request_uncached_firmware()
3735 if (bp->mips_firmware->size < sizeof(*mips_fw) || bnx2_request_uncached_firmware()
3736 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || bnx2_request_uncached_firmware()
3737 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || bnx2_request_uncached_firmware()
3738 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || bnx2_request_uncached_firmware()
3739 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || bnx2_request_uncached_firmware()
3740 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { bnx2_request_uncached_firmware()
3745 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || bnx2_request_uncached_firmware()
3746 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || bnx2_request_uncached_firmware()
3747 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { bnx2_request_uncached_firmware()
3756 release_firmware(bp->rv2p_firmware); bnx2_request_uncached_firmware()
3757 bp->rv2p_firmware = NULL; bnx2_request_uncached_firmware()
3759 release_firmware(bp->mips_firmware); bnx2_request_uncached_firmware()
3763 static int bnx2_request_firmware(struct bnx2 *bp) bnx2_request_firmware() argument
3765 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp); bnx2_request_firmware()
3781 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, load_rv2p_fw() argument
3792 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); load_rv2p_fw()
3803 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); load_rv2p_fw()
3805 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); load_rv2p_fw()
3809 BNX2_WR(bp, addr, val); load_rv2p_fw()
3812 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); load_rv2p_fw()
3819 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code); load_rv2p_fw()
3822 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code); load_rv2p_fw()
3825 BNX2_WR(bp, addr, val); load_rv2p_fw()
3831 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); load_rv2p_fw()
3834 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); load_rv2p_fw()
3841 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, load_cpu_fw() argument
3850 val = bnx2_reg_rd_ind(bp, cpu_reg->mode); load_cpu_fw()
3852 bnx2_reg_wr_ind(bp, cpu_reg->mode, val); load_cpu_fw()
3853 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); load_cpu_fw()
3859 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw()
3866 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw()
3873 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw()
3880 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw()
3887 data = (__be32 *)(bp->mips_firmware->data + file_offset); load_cpu_fw()
3894 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); load_cpu_fw()
3898 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0); load_cpu_fw()
3901 bnx2_reg_wr_ind(bp, cpu_reg->pc, val); load_cpu_fw()
3904 val = bnx2_reg_rd_ind(bp, cpu_reg->mode); load_cpu_fw()
3906 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); load_cpu_fw()
3907 bnx2_reg_wr_ind(bp, cpu_reg->mode, val); load_cpu_fw()
3913 bnx2_init_cpus(struct bnx2 *bp) bnx2_init_cpus() argument
3916 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; bnx2_init_cpus()
3918 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; bnx2_init_cpus()
3922 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); bnx2_init_cpus()
3923 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); bnx2_init_cpus()
3926 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); bnx2_init_cpus()
3931 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); bnx2_init_cpus()
3936 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); bnx2_init_cpus()
3941 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); bnx2_init_cpus()
3946 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); bnx2_init_cpus()
3953 bnx2_setup_wol(struct bnx2 *bp) bnx2_setup_wol() argument
3958 if (bp->wol) { bnx2_setup_wol()
3962 autoneg = bp->autoneg; bnx2_setup_wol()
3963 advertising = bp->advertising; bnx2_setup_wol()
3965 if (bp->phy_port == PORT_TP) { bnx2_setup_wol()
3966 bp->autoneg = AUTONEG_SPEED; bnx2_setup_wol()
3967 bp->advertising = ADVERTISED_10baseT_Half | bnx2_setup_wol()
3974 spin_lock_bh(&bp->phy_lock); bnx2_setup_wol()
3975 bnx2_setup_phy(bp, bp->phy_port); bnx2_setup_wol()
3976 spin_unlock_bh(&bp->phy_lock); bnx2_setup_wol()
3978 bp->autoneg = autoneg; bnx2_setup_wol()
3979 bp->advertising = advertising; bnx2_setup_wol()
3981 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_setup_wol()
3983 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_setup_wol()
3990 if (bp->phy_port == PORT_TP) { bnx2_setup_wol()
3994 if (bp->line_speed == SPEED_2500) bnx2_setup_wol()
3998 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_setup_wol()
4002 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), bnx2_setup_wol()
4005 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); bnx2_setup_wol()
4008 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); bnx2_setup_wol()
4009 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); bnx2_setup_wol()
4010 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); bnx2_setup_wol()
4013 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_setup_wol()
4018 val = BNX2_RD(bp, BNX2_RPM_CONFIG); bnx2_setup_wol()
4020 BNX2_WR(bp, BNX2_RPM_CONFIG, val); bnx2_setup_wol()
4027 if (!(bp->flags & BNX2_FLAG_NO_WOL)) { bnx2_setup_wol()
4031 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { bnx2_setup_wol()
4032 bnx2_fw_sync(bp, wol_msg, 1, 0); bnx2_setup_wol()
4038 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); bnx2_setup_wol()
4039 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, bnx2_setup_wol()
4041 bnx2_fw_sync(bp, wol_msg, 1, 0); bnx2_setup_wol()
4042 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); bnx2_setup_wol()
4048 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) bnx2_set_power_state() argument
4054 pci_enable_wake(bp->pdev, PCI_D0, false); bnx2_set_power_state()
4055 pci_set_power_state(bp->pdev, PCI_D0); bnx2_set_power_state()
4057 val = BNX2_RD(bp, BNX2_EMAC_MODE); bnx2_set_power_state()
4060 BNX2_WR(bp, BNX2_EMAC_MODE, val); bnx2_set_power_state()
4062 val = BNX2_RD(bp, BNX2_RPM_CONFIG); bnx2_set_power_state()
4064 BNX2_WR(bp, BNX2_RPM_CONFIG, val); bnx2_set_power_state()
4068 bnx2_setup_wol(bp); bnx2_set_power_state()
4069 pci_wake_from_d3(bp->pdev, bp->wol); bnx2_set_power_state()
4070 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_set_power_state()
4071 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { bnx2_set_power_state()
4073 if (bp->wol) bnx2_set_power_state()
4074 pci_set_power_state(bp->pdev, PCI_D3hot); bnx2_set_power_state()
4078 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_set_power_state()
4085 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_set_power_state()
4088 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); bnx2_set_power_state()
4090 pci_set_power_state(bp->pdev, PCI_D3hot); bnx2_set_power_state()
4104 bnx2_acquire_nvram_lock(struct bnx2 *bp) bnx2_acquire_nvram_lock() argument
4110 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); bnx2_acquire_nvram_lock()
4112 val = BNX2_RD(bp, BNX2_NVM_SW_ARB); bnx2_acquire_nvram_lock()
4126 bnx2_release_nvram_lock(struct bnx2 *bp) bnx2_release_nvram_lock() argument
4132 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); bnx2_release_nvram_lock()
4135 val = BNX2_RD(bp, BNX2_NVM_SW_ARB); bnx2_release_nvram_lock()
4150 bnx2_enable_nvram_write(struct bnx2 *bp) bnx2_enable_nvram_write() argument
4154 val = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_enable_nvram_write()
4155 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); bnx2_enable_nvram_write()
4157 if (bp->flash_info->flags & BNX2_NV_WREN) { bnx2_enable_nvram_write()
4160 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_enable_nvram_write()
4161 BNX2_WR(bp, BNX2_NVM_COMMAND, bnx2_enable_nvram_write()
4167 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_enable_nvram_write()
4179 bnx2_disable_nvram_write(struct bnx2 *bp) bnx2_disable_nvram_write() argument
4183 val = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_disable_nvram_write()
4184 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); bnx2_disable_nvram_write()
4189 bnx2_enable_nvram_access(struct bnx2 *bp) bnx2_enable_nvram_access() argument
4193 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); bnx2_enable_nvram_access()
4195 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, bnx2_enable_nvram_access()
4200 bnx2_disable_nvram_access(struct bnx2 *bp) bnx2_disable_nvram_access() argument
4204 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); bnx2_disable_nvram_access()
4206 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, bnx2_disable_nvram_access()
4212 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) bnx2_nvram_erase_page() argument
4217 if (bp->flash_info->flags & BNX2_NV_BUFFERED) bnx2_nvram_erase_page()
4226 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_erase_page()
4229 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_erase_page()
4232 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_erase_page()
4240 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_nvram_erase_page()
4252 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) bnx2_nvram_read_dword() argument
4261 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { bnx2_nvram_read_dword()
4262 offset = ((offset / bp->flash_info->page_size) << bnx2_nvram_read_dword()
4263 bp->flash_info->page_bits) + bnx2_nvram_read_dword()
4264 (offset % bp->flash_info->page_size); bnx2_nvram_read_dword()
4268 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_read_dword()
4271 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_read_dword()
4274 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_read_dword()
4282 val = BNX2_RD(bp, BNX2_NVM_COMMAND); bnx2_nvram_read_dword()
4284 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ)); bnx2_nvram_read_dword()
4297 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) bnx2_nvram_write_dword() argument
4307 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { bnx2_nvram_write_dword()
4308 offset = ((offset / bp->flash_info->page_size) << bnx2_nvram_write_dword()
4309 bp->flash_info->page_bits) + bnx2_nvram_write_dword()
4310 (offset % bp->flash_info->page_size); bnx2_nvram_write_dword()
4314 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); bnx2_nvram_write_dword()
4319 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); bnx2_nvram_write_dword()
4322 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); bnx2_nvram_write_dword()
4325 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); bnx2_nvram_write_dword()
4331 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) bnx2_nvram_write_dword()
4341 bnx2_init_nvram(struct bnx2 *bp) bnx2_init_nvram() argument
4347 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_nvram()
4348 bp->flash_info = &flash_5709; bnx2_init_nvram()
4353 val = BNX2_RD(bp, BNX2_NVM_CFG1); bnx2_init_nvram()
4364 bp->flash_info = flash; bnx2_init_nvram()
4382 bp->flash_info = flash; bnx2_init_nvram()
4385 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_init_nvram()
4389 bnx2_enable_nvram_access(bp); bnx2_init_nvram()
4392 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1); bnx2_init_nvram()
4393 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2); bnx2_init_nvram()
4394 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3); bnx2_init_nvram()
4395 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1); bnx2_init_nvram()
4398 bnx2_disable_nvram_access(bp); bnx2_init_nvram()
4399 bnx2_release_nvram_lock(bp); bnx2_init_nvram()
4407 bp->flash_info = NULL; bnx2_init_nvram()
4413 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2); bnx2_init_nvram()
4416 bp->flash_size = val; bnx2_init_nvram()
4418 bp->flash_size = bp->flash_info->total_size; bnx2_init_nvram()
4424 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, bnx2_nvram_read() argument
4434 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_nvram_read()
4438 bnx2_enable_nvram_access(bp); bnx2_nvram_read()
4462 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read()
4487 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read()
4500 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); bnx2_nvram_read()
4508 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); bnx2_nvram_read()
4520 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); bnx2_nvram_read()
4526 bnx2_disable_nvram_access(bp); bnx2_nvram_read()
4528 bnx2_release_nvram_lock(bp); bnx2_nvram_read()
4534 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, bnx2_nvram_write() argument
4552 if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) bnx2_nvram_write()
4559 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) bnx2_nvram_write()
4577 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write()
4593 page_start -= (page_start % bp->flash_info->page_size); bnx2_nvram_write()
4595 page_end = page_start + bp->flash_info->page_size; bnx2_nvram_write()
4603 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) bnx2_nvram_write()
4607 bnx2_enable_nvram_access(bp); bnx2_nvram_write()
4610 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write()
4615 for (j = 0; j < bp->flash_info->page_size; j += 4) { bnx2_nvram_write()
4616 if (j == (bp->flash_info->page_size - 4)) { bnx2_nvram_write()
4619 rc = bnx2_nvram_read_dword(bp, bnx2_nvram_write()
4632 if ((rc = bnx2_enable_nvram_write(bp)) != 0) bnx2_nvram_write()
4638 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write()
4640 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) bnx2_nvram_write()
4644 bnx2_enable_nvram_write(bp); bnx2_nvram_write()
4649 rc = bnx2_nvram_write_dword(bp, addr, bnx2_nvram_write()
4662 ((bp->flash_info->flags & BNX2_NV_BUFFERED) && bnx2_nvram_write()
4667 rc = bnx2_nvram_write_dword(bp, addr, buf, bnx2_nvram_write()
4679 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { bnx2_nvram_write()
4686 rc = bnx2_nvram_write_dword(bp, addr, bnx2_nvram_write()
4697 bnx2_disable_nvram_write(bp); bnx2_nvram_write()
4700 bnx2_disable_nvram_access(bp); bnx2_nvram_write()
4701 bnx2_release_nvram_lock(bp); bnx2_nvram_write()
4714 bnx2_init_fw_cap(struct bnx2 *bp) bnx2_init_fw_cap() argument
4718 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; bnx2_init_fw_cap()
4719 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap()
4721 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) bnx2_init_fw_cap()
4722 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap()
4724 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); bnx2_init_fw_cap()
4729 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; bnx2_init_fw_cap()
4733 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && bnx2_init_fw_cap()
4737 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; bnx2_init_fw_cap()
4739 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); bnx2_init_fw_cap()
4741 bp->phy_port = PORT_FIBRE; bnx2_init_fw_cap()
4743 bp->phy_port = PORT_TP; bnx2_init_fw_cap()
4749 if (netif_running(bp->dev) && sig) bnx2_init_fw_cap()
4750 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); bnx2_init_fw_cap()
4754 bnx2_setup_msix_tbl(struct bnx2 *bp) bnx2_setup_msix_tbl() argument
4756 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); bnx2_setup_msix_tbl()
4758 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); bnx2_setup_msix_tbl()
4759 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); bnx2_setup_msix_tbl()
4763 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) bnx2_reset_chip() argument
4771 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || bnx2_reset_chip()
4772 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { bnx2_reset_chip()
4773 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, bnx2_reset_chip()
4778 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); bnx2_reset_chip()
4781 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_reset_chip()
4783 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); bnx2_reset_chip()
4784 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_reset_chip()
4788 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); bnx2_reset_chip()
4795 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); bnx2_reset_chip()
4799 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE, bnx2_reset_chip()
4804 val = BNX2_RD(bp, BNX2_MISC_ID); bnx2_reset_chip()
4806 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_reset_chip()
4807 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); bnx2_reset_chip()
4808 BNX2_RD(bp, BNX2_MISC_COMMAND); bnx2_reset_chip()
4814 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); bnx2_reset_chip()
4822 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); bnx2_reset_chip()
4828 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_reset_chip()
4829 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) bnx2_reset_chip()
4834 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG); bnx2_reset_chip()
4849 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0); bnx2_reset_chip()
4856 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0); bnx2_reset_chip()
4860 spin_lock_bh(&bp->phy_lock); bnx2_reset_chip()
4861 old_port = bp->phy_port; bnx2_reset_chip()
4862 bnx2_init_fw_cap(bp); bnx2_reset_chip()
4863 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && bnx2_reset_chip()
4864 old_port != bp->phy_port) bnx2_reset_chip()
4865 bnx2_set_default_remote_link(bp); bnx2_reset_chip()
4866 spin_unlock_bh(&bp->phy_lock); bnx2_reset_chip()
4868 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_reset_chip()
4871 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); bnx2_reset_chip()
4874 rc = bnx2_alloc_bad_rbuf(bp); bnx2_reset_chip()
4877 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_reset_chip()
4878 bnx2_setup_msix_tbl(bp); bnx2_reset_chip()
4880 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL, bnx2_reset_chip()
4888 bnx2_init_chip(struct bnx2 *bp) bnx2_init_chip() argument
4894 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); bnx2_init_chip()
4907 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) bnx2_init_chip()
4910 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) && bnx2_init_chip()
4911 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) && bnx2_init_chip()
4912 !(bp->flags & BNX2_FLAG_PCIX)) bnx2_init_chip()
4915 BNX2_WR(bp, BNX2_DMA_CONFIG, val); bnx2_init_chip()
4917 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_chip()
4918 val = BNX2_RD(bp, BNX2_TDMA_CONFIG); bnx2_init_chip()
4920 BNX2_WR(bp, BNX2_TDMA_CONFIG, val); bnx2_init_chip()
4923 if (bp->flags & BNX2_FLAG_PCIX) { bnx2_init_chip()
4926 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, bnx2_init_chip()
4928 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, bnx2_init_chip()
4932 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, bnx2_init_chip()
4939 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip()
4940 rc = bnx2_init_5709_context(bp); bnx2_init_chip()
4944 bnx2_init_context(bp); bnx2_init_chip()
4946 if ((rc = bnx2_init_cpus(bp)) != 0) bnx2_init_chip()
4949 bnx2_init_nvram(bp); bnx2_init_chip()
4951 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_init_chip()
4953 val = BNX2_RD(bp, BNX2_MQ_CONFIG); bnx2_init_chip()
4956 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip()
4958 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bnx2_init_chip()
4962 BNX2_WR(bp, BNX2_MQ_CONFIG, val); bnx2_init_chip()
4965 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); bnx2_init_chip()
4966 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val); bnx2_init_chip()
4969 BNX2_WR(bp, BNX2_RV2P_CONFIG, val); bnx2_init_chip()
4972 val = BNX2_RD(bp, BNX2_TBDR_CONFIG); bnx2_init_chip()
4975 BNX2_WR(bp, BNX2_TBDR_CONFIG, val); bnx2_init_chip()
4977 val = bp->mac_addr[0] + bnx2_init_chip()
4978 (bp->mac_addr[1] << 8) + bnx2_init_chip()
4979 (bp->mac_addr[2] << 16) + bnx2_init_chip()
4980 bp->mac_addr[3] + bnx2_init_chip()
4981 (bp->mac_addr[4] << 8) + bnx2_init_chip()
4982 (bp->mac_addr[5] << 16); bnx2_init_chip()
4983 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); bnx2_init_chip()
4986 mtu = bp->dev->mtu; bnx2_init_chip()
4990 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); bnx2_init_chip()
4995 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); bnx2_init_chip()
4996 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); bnx2_init_chip()
4997 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu)); bnx2_init_chip()
4999 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size); bnx2_init_chip()
5001 bp->bnx2_napi[i].last_status_idx = 0; bnx2_init_chip()
5003 bp->idle_chk_status_idx = 0xffff; bnx2_init_chip()
5006 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); bnx2_init_chip()
5008 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L, bnx2_init_chip()
5009 (u64) bp->status_blk_mapping & 0xffffffff); bnx2_init_chip()
5010 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); bnx2_init_chip()
5012 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L, bnx2_init_chip()
5013 (u64) bp->stats_blk_mapping & 0xffffffff); bnx2_init_chip()
5014 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H, bnx2_init_chip()
5015 (u64) bp->stats_blk_mapping >> 32); bnx2_init_chip()
5017 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, bnx2_init_chip()
5018 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); bnx2_init_chip()
5020 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, bnx2_init_chip()
5021 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); bnx2_init_chip()
5023 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP, bnx2_init_chip()
5024 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); bnx2_init_chip()
5026 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); bnx2_init_chip()
5028 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); bnx2_init_chip()
5030 BNX2_WR(bp, BNX2_HC_COM_TICKS, bnx2_init_chip()
5031 (bp->com_ticks_int << 16) | bp->com_ticks); bnx2_init_chip()
5033 BNX2_WR(bp, BNX2_HC_CMD_TICKS, bnx2_init_chip()
5034 (bp->cmd_ticks_int << 16) | bp->cmd_ticks); bnx2_init_chip()
5036 if (bp->flags & BNX2_FLAG_BROKEN_STATS) bnx2_init_chip()
5037 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0); bnx2_init_chip()
5039 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); bnx2_init_chip()
5040 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ bnx2_init_chip()
5042 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) bnx2_init_chip()
5049 if (bp->flags & BNX2_FLAG_USING_MSIX) { bnx2_init_chip()
5050 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, bnx2_init_chip()
5056 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) bnx2_init_chip()
5059 BNX2_WR(bp, BNX2_HC_CONFIG, val); bnx2_init_chip()
5061 if (bp->rx_ticks < 25) bnx2_init_chip()
5062 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); bnx2_init_chip()
5064 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0); bnx2_init_chip()
5066 for (i = 1; i < bp->irq_nvecs; i++) { bnx2_init_chip()
5070 BNX2_WR(bp, base, bnx2_init_chip()
5075 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, bnx2_init_chip()
5076 (bp->tx_quick_cons_trip_int << 16) | bnx2_init_chip()
5077 bp->tx_quick_cons_trip); bnx2_init_chip()
5079 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF, bnx2_init_chip()
5080 (bp->tx_ticks_int << 16) | bp->tx_ticks); bnx2_init_chip()
5082 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, bnx2_init_chip()
5083 (bp->rx_quick_cons_trip_int << 16) | bnx2_init_chip()
5084 bp->rx_quick_cons_trip); bnx2_init_chip()
5086 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF, bnx2_init_chip()
5087 (bp->rx_ticks_int << 16) | bp->rx_ticks); bnx2_init_chip()
5091 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); bnx2_init_chip()
5093 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); bnx2_init_chip()
5096 bnx2_set_rx_mode(bp->dev); bnx2_init_chip()
5098 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_chip()
5099 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); bnx2_init_chip()
5101 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); bnx2_init_chip()
5103 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, bnx2_init_chip()
5106 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); bnx2_init_chip()
5107 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS); bnx2_init_chip()
5111 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_init_chip()
5117 bnx2_clear_ring_states(struct bnx2 *bp) bnx2_clear_ring_states() argument
5125 bnapi = &bp->bnx2_napi[i]; bnx2_clear_ring_states()
5140 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) bnx2_init_tx_context() argument
5145 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_tx_context()
5157 bnx2_ctx_wr(bp, cid_addr, offset0, val); bnx2_init_tx_context()
5160 bnx2_ctx_wr(bp, cid_addr, offset1, val); bnx2_init_tx_context()
5163 bnx2_ctx_wr(bp, cid_addr, offset2, val); bnx2_init_tx_context()
5166 bnx2_ctx_wr(bp, cid_addr, offset3, val); bnx2_init_tx_context()
5170 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) bnx2_init_tx_ring() argument
5177 bnapi = &bp->bnx2_napi[ring_num]; bnx2_init_tx_ring()
5185 bp->tx_wake_thresh = bp->tx_ring_size / 2; bnx2_init_tx_ring()
5198 bnx2_init_tx_context(bp, cid, txr); bnx2_init_tx_ring()
5226 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) bnx2_init_rx_ring() argument
5231 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num]; bnx2_init_rx_ring()
5242 bp->rx_buf_use_size, bp->rx_max_ring); bnx2_init_rx_ring()
5244 bnx2_init_rx_context(bp, cid); bnx2_init_rx_ring()
5246 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_rx_ring()
5247 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5); bnx2_init_rx_ring()
5248 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); bnx2_init_rx_ring()
5251 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); bnx2_init_rx_ring()
5252 if (bp->rx_pg_ring_size) { bnx2_init_rx_ring()
5255 PAGE_SIZE, bp->rx_max_pg_ring); bnx2_init_rx_ring()
5256 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; bnx2_init_rx_ring()
5257 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); bnx2_init_rx_ring()
5258 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, bnx2_init_rx_ring()
5262 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); bnx2_init_rx_ring()
5265 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); bnx2_init_rx_ring()
5267 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_rx_ring()
5268 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); bnx2_init_rx_ring()
5272 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); bnx2_init_rx_ring()
5275 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); bnx2_init_rx_ring()
5278 for (i = 0; i < bp->rx_pg_ring_size; i++) { bnx2_init_rx_ring()
5279 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { bnx2_init_rx_ring()
5280 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", bnx2_init_rx_ring()
5281 ring_num, i, bp->rx_pg_ring_size); bnx2_init_rx_ring()
5290 for (i = 0; i < bp->rx_ring_size; i++) { bnx2_init_rx_ring()
5291 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { bnx2_init_rx_ring()
5292 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", bnx2_init_rx_ring()
5293 ring_num, i, bp->rx_ring_size); bnx2_init_rx_ring()
5305 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); bnx2_init_rx_ring()
5306 BNX2_WR16(bp, rxr->rx_bidx_addr, prod); bnx2_init_rx_ring()
5308 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); bnx2_init_rx_ring()
5312 bnx2_init_all_rings(struct bnx2 *bp) bnx2_init_all_rings() argument
5317 bnx2_clear_ring_states(bp); bnx2_init_all_rings()
5319 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0); bnx2_init_all_rings()
5320 for (i = 0; i < bp->num_tx_rings; i++) bnx2_init_all_rings()
5321 bnx2_init_tx_ring(bp, i); bnx2_init_all_rings()
5323 if (bp->num_tx_rings > 1) bnx2_init_all_rings()
5324 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | bnx2_init_all_rings()
5327 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); bnx2_init_all_rings()
5328 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); bnx2_init_all_rings()
5330 for (i = 0; i < bp->num_rx_rings; i++) bnx2_init_all_rings()
5331 bnx2_init_rx_ring(bp, i); bnx2_init_all_rings()
5333 if (bp->num_rx_rings > 1) { bnx2_init_all_rings()
5339 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; bnx2_init_all_rings()
5341 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); bnx2_init_all_rings()
5342 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | bnx2_init_all_rings()
5353 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val); bnx2_init_all_rings()
5378 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) bnx2_set_rx_ring_size() argument
5383 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; bnx2_set_rx_ring_size()
5388 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; bnx2_set_rx_ring_size()
5389 bp->rx_pg_ring_size = 0; bnx2_set_rx_ring_size()
5390 bp->rx_max_pg_ring = 0; bnx2_set_rx_ring_size()
5391 bp->rx_max_pg_ring_idx = 0; bnx2_set_rx_ring_size()
5392 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) { bnx2_set_rx_ring_size()
5393 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; bnx2_set_rx_ring_size()
5399 bp->rx_pg_ring_size = jumbo_size; bnx2_set_rx_ring_size()
5400 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, bnx2_set_rx_ring_size()
5402 bp->rx_max_pg_ring_idx = bnx2_set_rx_ring_size()
5403 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1; bnx2_set_rx_ring_size()
5405 bp->rx_copy_thresh = 0; bnx2_set_rx_ring_size()
5408 bp->rx_buf_use_size = rx_size; bnx2_set_rx_ring_size()
5410 bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + bnx2_set_rx_ring_size()
5412 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bnx2_set_rx_ring_size()
5413 bp->rx_ring_size = size; bnx2_set_rx_ring_size()
5414 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); bnx2_set_rx_ring_size()
5415 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1; bnx2_set_rx_ring_size()
5419 bnx2_free_tx_skbs(struct bnx2 *bp) bnx2_free_tx_skbs() argument
5423 for (i = 0; i < bp->num_tx_rings; i++) { bnx2_free_tx_skbs()
5424 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_tx_skbs()
5441 dma_unmap_single(&bp->pdev->dev, bnx2_free_tx_skbs()
5452 dma_unmap_page(&bp->pdev->dev, bnx2_free_tx_skbs()
5459 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); bnx2_free_tx_skbs()
5464 bnx2_free_rx_skbs(struct bnx2 *bp) bnx2_free_rx_skbs() argument
5468 for (i = 0; i < bp->num_rx_rings; i++) { bnx2_free_rx_skbs()
5469 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_free_rx_skbs()
5476 for (j = 0; j < bp->rx_max_ring_idx; j++) { bnx2_free_rx_skbs()
5483 dma_unmap_single(&bp->pdev->dev, bnx2_free_rx_skbs()
5485 bp->rx_buf_use_size, bnx2_free_rx_skbs()
5492 for (j = 0; j < bp->rx_max_pg_ring_idx; j++) bnx2_free_rx_skbs()
5493 bnx2_free_rx_page(bp, rxr, j); bnx2_free_rx_skbs()
5498 bnx2_free_skbs(struct bnx2 *bp) bnx2_free_skbs() argument
5500 bnx2_free_tx_skbs(bp); bnx2_free_skbs()
5501 bnx2_free_rx_skbs(bp); bnx2_free_skbs()
5505 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) bnx2_reset_nic() argument
5509 rc = bnx2_reset_chip(bp, reset_code); bnx2_reset_nic()
5510 bnx2_free_skbs(bp); bnx2_reset_nic()
5514 if ((rc = bnx2_init_chip(bp)) != 0) bnx2_reset_nic()
5517 bnx2_init_all_rings(bp); bnx2_reset_nic()
5522 bnx2_init_nic(struct bnx2 *bp, int reset_phy) bnx2_init_nic() argument
5526 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) bnx2_init_nic()
5529 spin_lock_bh(&bp->phy_lock); bnx2_init_nic()
5530 bnx2_init_phy(bp, reset_phy); bnx2_init_nic()
5531 bnx2_set_link(bp); bnx2_init_nic()
5532 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_init_nic()
5533 bnx2_remote_phy_event(bp); bnx2_init_nic()
5534 spin_unlock_bh(&bp->phy_lock); bnx2_init_nic()
5539 bnx2_shutdown_chip(struct bnx2 *bp) bnx2_shutdown_chip() argument
5543 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_shutdown_chip()
5545 else if (bp->wol) bnx2_shutdown_chip()
5550 return bnx2_reset_chip(bp, reset_code); bnx2_shutdown_chip()
5554 bnx2_test_registers(struct bnx2 *bp) bnx2_test_registers() argument
5675 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_registers()
5689 save_val = readl(bp->regview + offset); bnx2_test_registers()
5691 writel(0, bp->regview + offset); bnx2_test_registers()
5693 val = readl(bp->regview + offset); bnx2_test_registers()
5702 writel(0xffffffff, bp->regview + offset); bnx2_test_registers()
5704 val = readl(bp->regview + offset); bnx2_test_registers()
5713 writel(save_val, bp->regview + offset); bnx2_test_registers()
5717 writel(save_val, bp->regview + offset); bnx2_test_registers()
5725 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) bnx2_do_mem_test() argument
5736 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); bnx2_do_mem_test()
5738 if (bnx2_reg_rd_ind(bp, start + offset) != bnx2_do_mem_test()
5748 bnx2_test_memory(struct bnx2 *bp) bnx2_test_memory() argument
5774 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_test_memory()
5780 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, bnx2_test_memory()
5793 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) bnx2_run_loopback() argument
5805 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; bnx2_run_loopback()
5814 bp->loopback = MAC_LOOPBACK; bnx2_run_loopback()
5815 bnx2_set_mac_loopback(bp); bnx2_run_loopback()
5818 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_run_loopback()
5821 bp->loopback = PHY_LOOPBACK; bnx2_run_loopback()
5822 bnx2_set_phy_loopback(bp); bnx2_run_loopback()
5827 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); bnx2_run_loopback()
5828 skb = netdev_alloc_skb(bp->dev, pkt_size); bnx2_run_loopback()
5832 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); bnx2_run_loopback()
5837 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, bnx2_run_loopback()
5839 if (dma_mapping_error(&bp->pdev->dev, map)) { bnx2_run_loopback()
5844 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_run_loopback()
5845 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_run_loopback()
5847 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_run_loopback()
5865 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); bnx2_run_loopback()
5866 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); bnx2_run_loopback()
5870 BNX2_WR(bp, BNX2_HC_COMMAND, bnx2_run_loopback()
5871 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); bnx2_run_loopback()
5873 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_run_loopback()
5877 dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); bnx2_run_loopback()
5894 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2_run_loopback()
5896 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); bnx2_run_loopback()
5921 bp->loopback = 0; bnx2_run_loopback()
5931 bnx2_test_loopback(struct bnx2 *bp) bnx2_test_loopback() argument
5935 if (!netif_running(bp->dev)) bnx2_test_loopback()
5938 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_test_loopback()
5939 spin_lock_bh(&bp->phy_lock); bnx2_test_loopback()
5940 bnx2_init_phy(bp, 1); bnx2_test_loopback()
5941 spin_unlock_bh(&bp->phy_lock); bnx2_test_loopback()
5942 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) bnx2_test_loopback()
5944 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) bnx2_test_loopback()
5953 bnx2_test_nvram(struct bnx2 *bp) bnx2_test_nvram() argument
5960 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) bnx2_test_nvram()
5969 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) bnx2_test_nvram()
5988 bnx2_test_link(struct bnx2 *bp) bnx2_test_link() argument
5992 if (!netif_running(bp->dev)) bnx2_test_link()
5995 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_test_link()
5996 if (bp->link_up) bnx2_test_link()
6000 spin_lock_bh(&bp->phy_lock); bnx2_test_link()
6001 bnx2_enable_bmsr1(bp); bnx2_test_link()
6002 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_test_link()
6003 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_test_link()
6004 bnx2_disable_bmsr1(bp); bnx2_test_link()
6005 spin_unlock_bh(&bp->phy_lock); bnx2_test_link()
6014 bnx2_test_intr(struct bnx2 *bp) bnx2_test_intr() argument
6019 if (!netif_running(bp->dev)) bnx2_test_intr()
6022 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; bnx2_test_intr()
6025 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); bnx2_test_intr()
6026 BNX2_RD(bp, BNX2_HC_COMMAND); bnx2_test_intr()
6029 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != bnx2_test_intr()
6045 bnx2_5706_serdes_has_link(struct bnx2 *bp) bnx2_5706_serdes_has_link() argument
6049 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL) bnx2_5706_serdes_has_link()
6052 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); bnx2_5706_serdes_has_link()
6053 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); bnx2_5706_serdes_has_link()
6058 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_5706_serdes_has_link()
6059 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_5706_serdes_has_link()
6060 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); bnx2_5706_serdes_has_link()
6065 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1); bnx2_5706_serdes_has_link()
6066 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); bnx2_5706_serdes_has_link()
6067 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); bnx2_5706_serdes_has_link()
6076 bnx2_5706_serdes_timer(struct bnx2 *bp) bnx2_5706_serdes_timer() argument
6080 spin_lock(&bp->phy_lock); bnx2_5706_serdes_timer()
6081 if (bp->serdes_an_pending) { bnx2_5706_serdes_timer()
6082 bp->serdes_an_pending--; bnx2_5706_serdes_timer()
6084 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { bnx2_5706_serdes_timer()
6087 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5706_serdes_timer()
6089 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706_serdes_timer()
6092 if (bnx2_5706_serdes_has_link(bp)) { bnx2_5706_serdes_timer()
6095 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_5706_serdes_timer()
6096 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_5706_serdes_timer()
6100 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && bnx2_5706_serdes_timer()
6101 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { bnx2_5706_serdes_timer()
6104 bnx2_write_phy(bp, 0x17, 0x0f01); bnx2_5706_serdes_timer()
6105 bnx2_read_phy(bp, 0x15, &phy2); bnx2_5706_serdes_timer()
6109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5706_serdes_timer()
6111 bnx2_write_phy(bp, bp->mii_bmcr, bmcr); bnx2_5706_serdes_timer()
6113 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; bnx2_5706_serdes_timer()
6116 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5706_serdes_timer()
6121 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); bnx2_5706_serdes_timer()
6122 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); bnx2_5706_serdes_timer()
6123 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); bnx2_5706_serdes_timer()
6125 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) { bnx2_5706_serdes_timer()
6126 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) { bnx2_5706_serdes_timer()
6127 bnx2_5706s_force_link_dn(bp, 1); bnx2_5706_serdes_timer()
6128 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; bnx2_5706_serdes_timer()
6130 bnx2_set_link(bp); bnx2_5706_serdes_timer()
6131 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC)) bnx2_5706_serdes_timer()
6132 bnx2_set_link(bp); bnx2_5706_serdes_timer()
6134 spin_unlock(&bp->phy_lock); bnx2_5706_serdes_timer()
6138 bnx2_5708_serdes_timer(struct bnx2 *bp) bnx2_5708_serdes_timer() argument
6140 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_5708_serdes_timer()
6143 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) { bnx2_5708_serdes_timer()
6144 bp->serdes_an_pending = 0; bnx2_5708_serdes_timer()
6148 spin_lock(&bp->phy_lock); bnx2_5708_serdes_timer()
6149 if (bp->serdes_an_pending) bnx2_5708_serdes_timer()
6150 bp->serdes_an_pending--; bnx2_5708_serdes_timer()
6151 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { bnx2_5708_serdes_timer()
6154 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_5708_serdes_timer()
6156 bnx2_enable_forced_2g5(bp); bnx2_5708_serdes_timer()
6157 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT; bnx2_5708_serdes_timer()
6159 bnx2_disable_forced_2g5(bp); bnx2_5708_serdes_timer()
6160 bp->serdes_an_pending = 2; bnx2_5708_serdes_timer()
6161 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5708_serdes_timer()
6165 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_5708_serdes_timer()
6167 spin_unlock(&bp->phy_lock); bnx2_5708_serdes_timer()
6173 struct bnx2 *bp = (struct bnx2 *) data; bnx2_timer() local
6175 if (!netif_running(bp->dev)) bnx2_timer()
6178 if (atomic_read(&bp->intr_sem) != 0) bnx2_timer()
6181 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == bnx2_timer()
6183 bnx2_chk_missed_msi(bp); bnx2_timer()
6185 bnx2_send_heart_beat(bp); bnx2_timer()
6187 bp->stats_blk->stat_FwRxDrop = bnx2_timer()
6188 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT); bnx2_timer()
6191 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) bnx2_timer()
6192 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | bnx2_timer()
6195 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_timer()
6196 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) bnx2_timer()
6197 bnx2_5706_serdes_timer(bp); bnx2_timer()
6199 bnx2_5708_serdes_timer(bp); bnx2_timer()
6203 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_timer()
6207 bnx2_request_irq(struct bnx2 *bp) bnx2_request_irq() argument
6213 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX) bnx2_request_irq()
6218 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_request_irq()
6219 irq = &bp->irq_tbl[i]; bnx2_request_irq()
6221 &bp->bnx2_napi[i]); bnx2_request_irq()
6230 __bnx2_free_irq(struct bnx2 *bp) __bnx2_free_irq() argument
6235 for (i = 0; i < bp->irq_nvecs; i++) { __bnx2_free_irq()
6236 irq = &bp->irq_tbl[i]; __bnx2_free_irq()
6238 free_irq(irq->vector, &bp->bnx2_napi[i]); __bnx2_free_irq()
6244 bnx2_free_irq(struct bnx2 *bp) bnx2_free_irq() argument
6247 __bnx2_free_irq(bp); bnx2_free_irq()
6248 if (bp->flags & BNX2_FLAG_USING_MSI) bnx2_free_irq()
6249 pci_disable_msi(bp->pdev); bnx2_free_irq()
6250 else if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_free_irq()
6251 pci_disable_msix(bp->pdev); bnx2_free_irq()
6253 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI); bnx2_free_irq()
6257 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) bnx2_enable_msix() argument
6261 struct net_device *dev = bp->dev; bnx2_enable_msix()
6262 const int len = sizeof(bp->irq_tbl[0].name); bnx2_enable_msix()
6264 bnx2_setup_msix_tbl(bp); bnx2_enable_msix()
6265 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); bnx2_enable_msix()
6266 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); bnx2_enable_msix()
6267 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); bnx2_enable_msix()
6271 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL); bnx2_enable_msix()
6282 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, bnx2_enable_msix()
6291 bp->irq_nvecs = msix_vecs; bnx2_enable_msix()
6292 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; bnx2_enable_msix()
6294 bp->irq_tbl[i].vector = msix_ent[i].vector; bnx2_enable_msix()
6295 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); bnx2_enable_msix()
6296 bp->irq_tbl[i].handler = bnx2_msi_1shot; bnx2_enable_msix()
6301 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) bnx2_setup_int_mode() argument
6306 if (!bp->num_req_rx_rings) bnx2_setup_int_mode()
6307 msix_vecs = max(cpus + 1, bp->num_req_tx_rings); bnx2_setup_int_mode()
6308 else if (!bp->num_req_tx_rings) bnx2_setup_int_mode()
6309 msix_vecs = max(cpus, bp->num_req_rx_rings); bnx2_setup_int_mode()
6311 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings); bnx2_setup_int_mode()
6315 bp->irq_tbl[0].handler = bnx2_interrupt; bnx2_setup_int_mode()
6316 strcpy(bp->irq_tbl[0].name, bp->dev->name); bnx2_setup_int_mode()
6317 bp->irq_nvecs = 1; bnx2_setup_int_mode()
6318 bp->irq_tbl[0].vector = bp->pdev->irq; bnx2_setup_int_mode()
6320 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) bnx2_setup_int_mode()
6321 bnx2_enable_msix(bp, msix_vecs); bnx2_setup_int_mode()
6323 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && bnx2_setup_int_mode()
6324 !(bp->flags & BNX2_FLAG_USING_MSIX)) { bnx2_setup_int_mode()
6325 if (pci_enable_msi(bp->pdev) == 0) { bnx2_setup_int_mode()
6326 bp->flags |= BNX2_FLAG_USING_MSI; bnx2_setup_int_mode()
6327 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_setup_int_mode()
6328 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; bnx2_setup_int_mode()
6329 bp->irq_tbl[0].handler = bnx2_msi_1shot; bnx2_setup_int_mode()
6331 bp->irq_tbl[0].handler = bnx2_msi; bnx2_setup_int_mode()
6333 bp->irq_tbl[0].vector = bp->pdev->irq; bnx2_setup_int_mode()
6337 if (!bp->num_req_tx_rings) bnx2_setup_int_mode()
6338 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); bnx2_setup_int_mode()
6340 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings); bnx2_setup_int_mode()
6342 if (!bp->num_req_rx_rings) bnx2_setup_int_mode()
6343 bp->num_rx_rings = bp->irq_nvecs; bnx2_setup_int_mode()
6345 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings); bnx2_setup_int_mode()
6347 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings); bnx2_setup_int_mode()
6349 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings); bnx2_setup_int_mode()
6356 struct bnx2 *bp = netdev_priv(dev); bnx2_open() local
6359 rc = bnx2_request_firmware(bp); bnx2_open()
6365 bnx2_disable_int(bp); bnx2_open()
6367 rc = bnx2_setup_int_mode(bp, disable_msi); bnx2_open()
6370 bnx2_init_napi(bp); bnx2_open()
6371 bnx2_napi_enable(bp); bnx2_open()
6372 rc = bnx2_alloc_mem(bp); bnx2_open()
6376 rc = bnx2_request_irq(bp); bnx2_open()
6380 rc = bnx2_init_nic(bp, 1); bnx2_open()
6384 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_open()
6386 atomic_set(&bp->intr_sem, 0); bnx2_open()
6388 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block)); bnx2_open()
6390 bnx2_enable_int(bp); bnx2_open()
6392 if (bp->flags & BNX2_FLAG_USING_MSI) { bnx2_open()
6396 if (bnx2_test_intr(bp) != 0) { bnx2_open()
6397 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n"); bnx2_open()
6399 bnx2_disable_int(bp); bnx2_open()
6400 bnx2_free_irq(bp); bnx2_open()
6402 bnx2_setup_int_mode(bp, 1); bnx2_open()
6404 rc = bnx2_init_nic(bp, 0); bnx2_open()
6407 rc = bnx2_request_irq(bp); bnx2_open()
6410 del_timer_sync(&bp->timer); bnx2_open()
6413 bnx2_enable_int(bp); bnx2_open()
6416 if (bp->flags & BNX2_FLAG_USING_MSI) bnx2_open()
6418 else if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_open()
6426 bnx2_napi_disable(bp); bnx2_open()
6427 bnx2_free_skbs(bp); bnx2_open()
6428 bnx2_free_irq(bp); bnx2_open()
6429 bnx2_free_mem(bp); bnx2_open()
6430 bnx2_del_napi(bp); bnx2_open()
6431 bnx2_release_firmware(bp); bnx2_open()
6438 struct bnx2 *bp = container_of(work, struct bnx2, reset_task); bnx2_reset_task() local
6443 if (!netif_running(bp->dev)) { bnx2_reset_task()
6448 bnx2_netif_stop(bp, true); bnx2_reset_task()
6450 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd); bnx2_reset_task()
6453 pci_restore_state(bp->pdev); bnx2_reset_task()
6454 pci_save_state(bp->pdev); bnx2_reset_task()
6456 rc = bnx2_init_nic(bp, 1); bnx2_reset_task()
6458 netdev_err(bp->dev, "failed to reset NIC, closing\n"); bnx2_reset_task()
6459 bnx2_napi_enable(bp); bnx2_reset_task()
6460 dev_close(bp->dev); bnx2_reset_task()
6465 atomic_set(&bp->intr_sem, 1); bnx2_reset_task()
6466 bnx2_netif_start(bp, true); bnx2_reset_task()
6473 bnx2_dump_ftq(struct bnx2 *bp) bnx2_dump_ftq() argument
6477 struct net_device *dev = bp->dev; bnx2_dump_ftq()
6501 bnx2_reg_rd_ind(bp, ftq_arr[i].off)); bnx2_dump_ftq()
6506 reg, bnx2_reg_rd_ind(bp, reg), bnx2_dump_ftq()
6507 bnx2_reg_rd_ind(bp, reg + 4), bnx2_dump_ftq()
6508 bnx2_reg_rd_ind(bp, reg + 8), bnx2_dump_ftq()
6509 bnx2_reg_rd_ind(bp, reg + 0x1c), bnx2_dump_ftq()
6510 bnx2_reg_rd_ind(bp, reg + 0x1c), bnx2_dump_ftq()
6511 bnx2_reg_rd_ind(bp, reg + 0x20)); bnx2_dump_ftq()
6516 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); bnx2_dump_ftq()
6521 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i); bnx2_dump_ftq()
6522 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE, bnx2_dump_ftq()
6524 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); bnx2_dump_ftq()
6525 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) & bnx2_dump_ftq()
6529 cid = BNX2_RD(bp, BNX2_TBDC_CID); bnx2_dump_ftq()
6530 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX); bnx2_dump_ftq()
6531 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE); bnx2_dump_ftq()
6540 bnx2_dump_state(struct bnx2 *bp) bnx2_dump_state() argument
6542 struct net_device *dev = bp->dev; bnx2_dump_state()
6545 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); bnx2_dump_state()
6547 atomic_read(&bp->intr_sem), val1); bnx2_dump_state()
6548 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); bnx2_dump_state()
6549 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); bnx2_dump_state()
6552 BNX2_RD(bp, BNX2_EMAC_TX_STATUS), bnx2_dump_state()
6553 BNX2_RD(bp, BNX2_EMAC_RX_STATUS)); bnx2_dump_state()
6555 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); bnx2_dump_state()
6557 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); bnx2_dump_state()
6558 if (bp->flags & BNX2_FLAG_USING_MSIX) bnx2_dump_state()
6560 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); bnx2_dump_state()
6566 struct bnx2 *bp = netdev_priv(dev); bnx2_tx_timeout() local
6568 bnx2_dump_ftq(bp); bnx2_tx_timeout()
6569 bnx2_dump_state(bp); bnx2_tx_timeout()
6570 bnx2_dump_mcp_state(bp); bnx2_tx_timeout()
6573 schedule_work(&bp->reset_task); bnx2_tx_timeout()
6583 struct bnx2 *bp = netdev_priv(dev); bnx2_start_xmit() local
6596 bnapi = &bp->bnx2_napi[i]; bnx2_start_xmit()
6600 if (unlikely(bnx2_tx_avail(bp, txr) < bnx2_start_xmit()
6655 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); bnx2_start_xmit()
6656 if (dma_mapping_error(&bp->pdev->dev, mapping)) { bnx2_start_xmit()
6684 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len, bnx2_start_xmit()
6686 if (dma_mapping_error(&bp->pdev->dev, mapping)) bnx2_start_xmit()
6707 BNX2_WR16(bp, txr->tx_bidx_addr, prod); bnx2_start_xmit()
6708 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); bnx2_start_xmit()
6714 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { bnx2_start_xmit()
6723 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) bnx2_start_xmit()
6737 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_start_xmit()
6745 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), bnx2_start_xmit()
6758 struct bnx2 *bp = netdev_priv(dev); bnx2_close() local
6760 bnx2_disable_int_sync(bp); bnx2_close()
6761 bnx2_napi_disable(bp); bnx2_close()
6763 del_timer_sync(&bp->timer); bnx2_close()
6764 bnx2_shutdown_chip(bp); bnx2_close()
6765 bnx2_free_irq(bp); bnx2_close()
6766 bnx2_free_skbs(bp); bnx2_close()
6767 bnx2_free_mem(bp); bnx2_close()
6768 bnx2_del_napi(bp); bnx2_close()
6769 bp->link_up = 0; bnx2_close()
6770 netif_carrier_off(bp->dev); bnx2_close()
6775 bnx2_save_stats(struct bnx2 *bp) bnx2_save_stats() argument
6777 u32 *hw_stats = (u32 *) bp->stats_blk; bnx2_save_stats()
6778 u32 *temp_stats = (u32 *) bp->temp_stats_blk; bnx2_save_stats()
6802 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6803 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6806 (unsigned long) (bp->stats_blk->ctr + \
6807 bp->temp_stats_blk->ctr)
6812 struct bnx2 *bp = netdev_priv(dev); bnx2_get_stats64() local
6814 if (bp->stats_blk == NULL) bnx2_get_stats64()
6861 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || bnx2_get_stats64()
6862 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) bnx2_get_stats64()
6887 struct bnx2 *bp = netdev_priv(dev); bnx2_get_settings() local
6891 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_get_settings()
6894 } else if (bp->phy_port == PORT_FIBRE) bnx2_get_settings()
6902 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) bnx2_get_settings()
6916 spin_lock_bh(&bp->phy_lock); bnx2_get_settings()
6917 cmd->port = bp->phy_port; bnx2_get_settings()
6918 cmd->advertising = bp->advertising; bnx2_get_settings()
6920 if (bp->autoneg & AUTONEG_SPEED) { bnx2_get_settings()
6927 ethtool_cmd_speed_set(cmd, bp->line_speed); bnx2_get_settings()
6928 cmd->duplex = bp->duplex; bnx2_get_settings()
6929 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) { bnx2_get_settings()
6930 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX) bnx2_get_settings()
6940 spin_unlock_bh(&bp->phy_lock); bnx2_get_settings()
6943 cmd->phy_address = bp->phy_addr; bnx2_get_settings()
6951 struct bnx2 *bp = netdev_priv(dev); bnx2_set_settings() local
6952 u8 autoneg = bp->autoneg; bnx2_set_settings()
6953 u8 req_duplex = bp->req_duplex; bnx2_set_settings()
6954 u16 req_line_speed = bp->req_line_speed; bnx2_set_settings()
6955 u32 advertising = bp->advertising; bnx2_set_settings()
6958 spin_lock_bh(&bp->phy_lock); bnx2_set_settings()
6963 if (cmd->port != bp->phy_port && bnx2_set_settings()
6964 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) bnx2_set_settings()
6970 if (!netif_running(dev) && cmd->port != bp->phy_port) bnx2_set_settings()
6997 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) bnx2_set_settings()
7008 bp->autoneg = autoneg; bnx2_set_settings()
7009 bp->advertising = advertising; bnx2_set_settings()
7010 bp->req_line_speed = req_line_speed; bnx2_set_settings()
7011 bp->req_duplex = req_duplex; bnx2_set_settings()
7018 err = bnx2_setup_phy(bp, cmd->port); bnx2_set_settings()
7021 spin_unlock_bh(&bp->phy_lock); bnx2_set_settings()
7029 struct bnx2 *bp = netdev_priv(dev); bnx2_get_drvinfo() local
7033 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); bnx2_get_drvinfo()
7034 strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); bnx2_get_drvinfo()
7050 struct bnx2 *bp = netdev_priv(dev); bnx2_get_regs() local
7080 if (!netif_running(bp->dev)) bnx2_get_regs()
7087 *p++ = BNX2_RD(bp, offset); bnx2_get_regs()
7100 struct bnx2 *bp = netdev_priv(dev); bnx2_get_wol() local
7102 if (bp->flags & BNX2_FLAG_NO_WOL) { bnx2_get_wol()
7108 if (bp->wol) bnx2_get_wol()
7119 struct bnx2 *bp = netdev_priv(dev); bnx2_set_wol() local
7125 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_set_wol()
7128 bp->wol = 1; bnx2_set_wol()
7131 bp->wol = 0; bnx2_set_wol()
7134 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); bnx2_set_wol()
7142 struct bnx2 *bp = netdev_priv(dev); bnx2_nway_reset() local
7148 if (!(bp->autoneg & AUTONEG_SPEED)) { bnx2_nway_reset()
7152 spin_lock_bh(&bp->phy_lock); bnx2_nway_reset()
7154 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { bnx2_nway_reset()
7157 rc = bnx2_setup_remote_phy(bp, bp->phy_port); bnx2_nway_reset()
7158 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset()
7163 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_nway_reset()
7164 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); bnx2_nway_reset()
7165 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset()
7169 spin_lock_bh(&bp->phy_lock); bnx2_nway_reset()
7171 bp->current_interval = BNX2_SERDES_AN_TIMEOUT; bnx2_nway_reset()
7172 bp->serdes_an_pending = 1; bnx2_nway_reset()
7173 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2_nway_reset()
7176 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); bnx2_nway_reset()
7178 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); bnx2_nway_reset()
7180 spin_unlock_bh(&bp->phy_lock); bnx2_nway_reset()
7188 struct bnx2 *bp = netdev_priv(dev); bnx2_get_link() local
7190 return bp->link_up; bnx2_get_link()
7196 struct bnx2 *bp = netdev_priv(dev); bnx2_get_eeprom_len() local
7198 if (bp->flash_info == NULL) bnx2_get_eeprom_len()
7201 return (int) bp->flash_size; bnx2_get_eeprom_len()
7208 struct bnx2 *bp = netdev_priv(dev); bnx2_get_eeprom() local
7213 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); bnx2_get_eeprom()
7222 struct bnx2 *bp = netdev_priv(dev); bnx2_set_eeprom() local
7227 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); bnx2_set_eeprom()
7235 struct bnx2 *bp = netdev_priv(dev); bnx2_get_coalesce() local
7239 coal->rx_coalesce_usecs = bp->rx_ticks; bnx2_get_coalesce()
7240 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; bnx2_get_coalesce()
7241 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; bnx2_get_coalesce()
7242 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; bnx2_get_coalesce()
7244 coal->tx_coalesce_usecs = bp->tx_ticks; bnx2_get_coalesce()
7245 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; bnx2_get_coalesce()
7246 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; bnx2_get_coalesce()
7247 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; bnx2_get_coalesce()
7249 coal->stats_block_coalesce_usecs = bp->stats_ticks; bnx2_get_coalesce()
7257 struct bnx2 *bp = netdev_priv(dev); bnx2_set_coalesce() local
7259 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; bnx2_set_coalesce()
7260 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; bnx2_set_coalesce()
7262 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; bnx2_set_coalesce()
7263 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; bnx2_set_coalesce()
7265 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; bnx2_set_coalesce()
7266 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; bnx2_set_coalesce()
7268 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; bnx2_set_coalesce()
7269 if (bp->rx_quick_cons_trip_int > 0xff) bnx2_set_coalesce()
7270 bp->rx_quick_cons_trip_int = 0xff; bnx2_set_coalesce()
7272 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; bnx2_set_coalesce()
7273 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; bnx2_set_coalesce()
7275 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; bnx2_set_coalesce()
7276 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; bnx2_set_coalesce()
7278 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; bnx2_set_coalesce()
7279 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; bnx2_set_coalesce()
7281 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; bnx2_set_coalesce()
7282 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = bnx2_set_coalesce()
7285 bp->stats_ticks = coal->stats_block_coalesce_usecs; bnx2_set_coalesce()
7286 if (bp->flags & BNX2_FLAG_BROKEN_STATS) { bnx2_set_coalesce()
7287 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) bnx2_set_coalesce()
7288 bp->stats_ticks = USEC_PER_SEC; bnx2_set_coalesce()
7290 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) bnx2_set_coalesce()
7291 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_set_coalesce()
7292 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_set_coalesce()
7294 if (netif_running(bp->dev)) { bnx2_set_coalesce()
7295 bnx2_netif_stop(bp, true); bnx2_set_coalesce()
7296 bnx2_init_nic(bp, 0); bnx2_set_coalesce()
7297 bnx2_netif_start(bp, true); bnx2_set_coalesce()
7306 struct bnx2 *bp = netdev_priv(dev); bnx2_get_ringparam() local
7311 ering->rx_pending = bp->rx_ring_size; bnx2_get_ringparam()
7312 ering->rx_jumbo_pending = bp->rx_pg_ring_size; bnx2_get_ringparam()
7315 ering->tx_pending = bp->tx_ring_size; bnx2_get_ringparam()
7319 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq) bnx2_change_ring_size() argument
7321 if (netif_running(bp->dev)) { bnx2_change_ring_size()
7323 bnx2_save_stats(bp); bnx2_change_ring_size()
7325 bnx2_netif_stop(bp, true); bnx2_change_ring_size()
7326 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_change_ring_size()
7328 bnx2_free_irq(bp); bnx2_change_ring_size()
7329 bnx2_del_napi(bp); bnx2_change_ring_size()
7331 __bnx2_free_irq(bp); bnx2_change_ring_size()
7333 bnx2_free_skbs(bp); bnx2_change_ring_size()
7334 bnx2_free_mem(bp); bnx2_change_ring_size()
7337 bnx2_set_rx_ring_size(bp, rx); bnx2_change_ring_size()
7338 bp->tx_ring_size = tx; bnx2_change_ring_size()
7340 if (netif_running(bp->dev)) { bnx2_change_ring_size()
7344 rc = bnx2_setup_int_mode(bp, disable_msi); bnx2_change_ring_size()
7345 bnx2_init_napi(bp); bnx2_change_ring_size()
7349 rc = bnx2_alloc_mem(bp); bnx2_change_ring_size()
7352 rc = bnx2_request_irq(bp); bnx2_change_ring_size()
7355 rc = bnx2_init_nic(bp, 0); bnx2_change_ring_size()
7358 bnx2_napi_enable(bp); bnx2_change_ring_size()
7359 dev_close(bp->dev); bnx2_change_ring_size()
7363 mutex_lock(&bp->cnic_lock); bnx2_change_ring_size()
7365 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) bnx2_change_ring_size()
7366 bnx2_setup_cnic_irq_info(bp); bnx2_change_ring_size()
7367 mutex_unlock(&bp->cnic_lock); bnx2_change_ring_size()
7369 bnx2_netif_start(bp, true); bnx2_change_ring_size()
7377 struct bnx2 *bp = netdev_priv(dev); bnx2_set_ringparam() local
7386 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending, bnx2_set_ringparam()
7394 struct bnx2 *bp = netdev_priv(dev); bnx2_get_pauseparam() local
7396 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); bnx2_get_pauseparam()
7397 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); bnx2_get_pauseparam()
7398 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); bnx2_get_pauseparam()
7404 struct bnx2 *bp = netdev_priv(dev); bnx2_set_pauseparam() local
7406 bp->req_flow_ctrl = 0; bnx2_set_pauseparam()
7408 bp->req_flow_ctrl |= FLOW_CTRL_RX; bnx2_set_pauseparam()
7410 bp->req_flow_ctrl |= FLOW_CTRL_TX; bnx2_set_pauseparam()
7413 bp->autoneg |= AUTONEG_FLOW_CTRL; bnx2_set_pauseparam()
7416 bp->autoneg &= ~AUTONEG_FLOW_CTRL; bnx2_set_pauseparam()
7420 spin_lock_bh(&bp->phy_lock); bnx2_set_pauseparam()
7421 bnx2_setup_phy(bp, bp->phy_port); bnx2_set_pauseparam()
7422 spin_unlock_bh(&bp->phy_lock); bnx2_set_pauseparam()
7582 struct bnx2 *bp = netdev_priv(dev); bnx2_self_test() local
7588 bnx2_netif_stop(bp, true); bnx2_self_test()
7589 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); bnx2_self_test()
7590 bnx2_free_skbs(bp); bnx2_self_test()
7592 if (bnx2_test_registers(bp) != 0) { bnx2_self_test()
7596 if (bnx2_test_memory(bp) != 0) { bnx2_self_test()
7600 if ((buf[2] = bnx2_test_loopback(bp)) != 0) bnx2_self_test()
7603 if (!netif_running(bp->dev)) bnx2_self_test()
7604 bnx2_shutdown_chip(bp); bnx2_self_test()
7606 bnx2_init_nic(bp, 1); bnx2_self_test()
7607 bnx2_netif_start(bp, true); bnx2_self_test()
7612 if (bp->link_up) bnx2_self_test()
7618 if (bnx2_test_nvram(bp) != 0) { bnx2_self_test()
7622 if (bnx2_test_intr(bp) != 0) { bnx2_self_test()
7627 if (bnx2_test_link(bp) != 0) { bnx2_self_test()
7653 struct bnx2 *bp = netdev_priv(dev); bnx2_get_ethtool_stats() local
7655 u32 *hw_stats = (u32 *) bp->stats_blk; bnx2_get_ethtool_stats()
7656 u32 *temp_stats = (u32 *) bp->temp_stats_blk; bnx2_get_ethtool_stats()
7664 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || bnx2_get_ethtool_stats()
7665 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) || bnx2_get_ethtool_stats()
7666 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) || bnx2_get_ethtool_stats()
7667 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) bnx2_get_ethtool_stats()
7699 struct bnx2 *bp = netdev_priv(dev); bnx2_set_phys_id() local
7703 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); bnx2_set_phys_id()
7704 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); bnx2_set_phys_id()
7708 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | bnx2_set_phys_id()
7717 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); bnx2_set_phys_id()
7721 BNX2_WR(bp, BNX2_EMAC_LED, 0); bnx2_set_phys_id()
7722 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); bnx2_set_phys_id()
7732 struct bnx2 *bp = netdev_priv(dev); bnx2_set_features() local
7741 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && bnx2_set_features()
7743 bnx2_netif_stop(bp, false); bnx2_set_features()
7746 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); bnx2_set_features()
7747 bnx2_netif_start(bp, false); bnx2_set_features()
7757 struct bnx2 *bp = netdev_priv(dev); bnx2_get_channels() local
7761 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { bnx2_get_channels()
7770 channels->rx_count = bp->num_rx_rings; bnx2_get_channels()
7771 channels->tx_count = bp->num_tx_rings; bnx2_get_channels()
7779 struct bnx2 *bp = netdev_priv(dev); bnx2_set_channels() local
7784 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { bnx2_set_channels()
7792 bp->num_req_rx_rings = channels->rx_count; bnx2_set_channels()
7793 bp->num_req_tx_rings = channels->tx_count; bnx2_set_channels()
7796 rc = bnx2_change_ring_size(bp, bp->rx_ring_size, bnx2_set_channels()
7797 bp->tx_ring_size, true); bnx2_set_channels()
7835 struct bnx2 *bp = netdev_priv(dev); bnx2_ioctl() local
7840 data->phy_id = bp->phy_addr; bnx2_ioctl()
7846 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_ioctl()
7852 spin_lock_bh(&bp->phy_lock); bnx2_ioctl()
7853 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); bnx2_ioctl()
7854 spin_unlock_bh(&bp->phy_lock); bnx2_ioctl()
7862 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) bnx2_ioctl()
7868 spin_lock_bh(&bp->phy_lock); bnx2_ioctl()
7869 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); bnx2_ioctl()
7870 spin_unlock_bh(&bp->phy_lock); bnx2_ioctl()
7886 struct bnx2 *bp = netdev_priv(dev); bnx2_change_mac_addr() local
7893 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); bnx2_change_mac_addr()
7902 struct bnx2 *bp = netdev_priv(dev); bnx2_change_mtu() local
7909 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, bnx2_change_mtu()
7917 struct bnx2 *bp = netdev_priv(dev); poll_bnx2() local
7920 for (i = 0; i < bp->irq_nvecs; i++) { poll_bnx2()
7921 struct bnx2_irq *irq = &bp->irq_tbl[i]; poll_bnx2()
7924 irq->handler(irq->vector, &bp->bnx2_napi[i]); poll_bnx2()
7931 bnx2_get_5709_media(struct bnx2 *bp) bnx2_get_5709_media() argument
7933 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); bnx2_get_5709_media()
7940 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media()
7949 if (bp->func == 0) { bnx2_get_5709_media()
7954 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media()
7962 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_get_5709_media()
7969 bnx2_get_pci_speed(struct bnx2 *bp) bnx2_get_pci_speed() argument
7973 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS); bnx2_get_pci_speed()
7977 bp->flags |= BNX2_FLAG_PCIX; bnx2_get_pci_speed()
7979 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); bnx2_get_pci_speed()
7984 bp->bus_speed_mhz = 133; bnx2_get_pci_speed()
7988 bp->bus_speed_mhz = 100; bnx2_get_pci_speed()
7993 bp->bus_speed_mhz = 66; bnx2_get_pci_speed()
7998 bp->bus_speed_mhz = 50; bnx2_get_pci_speed()
8004 bp->bus_speed_mhz = 33; bnx2_get_pci_speed()
8010 bp->bus_speed_mhz = 66; bnx2_get_pci_speed()
8012 bp->bus_speed_mhz = 33; bnx2_get_pci_speed()
8016 bp->flags |= BNX2_FLAG_PCI_32BIT; bnx2_get_pci_speed()
8021 bnx2_read_vpd_fw_ver(struct bnx2 *bp) bnx2_read_vpd_fw_ver() argument
8035 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, bnx2_read_vpd_fw_ver()
8081 memcpy(bp->fw_version, &data[j], len); bnx2_read_vpd_fw_ver()
8082 bp->fw_version[len] = ' '; bnx2_read_vpd_fw_ver()
8091 struct bnx2 *bp; bnx2_init_board() local
8098 bp = netdev_priv(dev); bnx2_init_board()
8100 bp->flags = 0; bnx2_init_board()
8101 bp->phy_flags = 0; bnx2_init_board()
8103 bp->temp_stats_blk = bnx2_init_board()
8106 if (bp->temp_stats_blk == NULL) { bnx2_init_board()
8133 bp->pm_cap = pdev->pm_cap; bnx2_init_board()
8134 if (bp->pm_cap == 0) { bnx2_init_board()
8141 bp->dev = dev; bnx2_init_board()
8142 bp->pdev = pdev; bnx2_init_board()
8144 spin_lock_init(&bp->phy_lock); bnx2_init_board()
8145 spin_lock_init(&bp->indirect_lock); bnx2_init_board()
8147 mutex_init(&bp->cnic_lock); bnx2_init_board()
8149 INIT_WORK(&bp->reset_task, bnx2_reset_task); bnx2_init_board()
8151 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID + bnx2_init_board()
8153 if (!bp->regview) { bnx2_init_board()
8163 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, bnx2_init_board()
8167 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID); bnx2_init_board()
8169 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bnx2_init_board()
8175 bp->flags |= BNX2_FLAG_PCIE; bnx2_init_board()
8176 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bnx2_init_board()
8177 bp->flags |= BNX2_FLAG_JUMBO_BROKEN; bnx2_init_board()
8182 bp->flags |= BNX2_FLAG_AER_ENABLED; bnx2_init_board()
8185 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); bnx2_init_board()
8186 if (bp->pcix_cap == 0) { bnx2_init_board()
8192 bp->flags |= BNX2_FLAG_BROKEN_STATS; bnx2_init_board()
8195 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && bnx2_init_board()
8196 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { bnx2_init_board()
8198 bp->flags |= BNX2_FLAG_MSIX_CAP; bnx2_init_board()
8201 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && bnx2_init_board()
8202 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { bnx2_init_board()
8204 bp->flags |= BNX2_FLAG_MSI_CAP; bnx2_init_board()
8208 if (BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_init_board()
8227 if (!(bp->flags & BNX2_FLAG_PCIE)) bnx2_init_board()
8228 bnx2_get_pci_speed(bp); bnx2_init_board()
8231 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_board()
8232 reg = BNX2_RD(bp, PCI_COMMAND); bnx2_init_board()
8234 BNX2_WR(bp, PCI_COMMAND, reg); bnx2_init_board()
8235 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && bnx2_init_board()
8236 !(bp->flags & BNX2_FLAG_PCIX)) { bnx2_init_board()
8243 bnx2_init_nvram(bp); bnx2_init_board()
8245 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); bnx2_init_board()
8247 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID) bnx2_init_board()
8248 bp->func = 1; bnx2_init_board()
8252 u32 off = bp->func << 2; bnx2_init_board()
8254 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); bnx2_init_board()
8256 bp->shmem_base = HOST_VIEW_SHMEM_BASE; bnx2_init_board()
8261 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE); bnx2_init_board()
8270 bnx2_read_vpd_fw_ver(bp); bnx2_init_board()
8272 j = strlen(bp->fw_version); bnx2_init_board()
8273 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); bnx2_init_board()
8278 bp->fw_version[j++] = 'b'; bnx2_init_board()
8279 bp->fw_version[j++] = 'c'; bnx2_init_board()
8280 bp->fw_version[j++] = ' '; bnx2_init_board()
8285 bp->fw_version[j++] = (num / k) + '0'; bnx2_init_board()
8290 bp->fw_version[j++] = '.'; bnx2_init_board()
8292 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); bnx2_init_board()
8294 bp->wol = 1; bnx2_init_board()
8297 bp->flags |= BNX2_FLAG_ASF_ENABLE; bnx2_init_board()
8300 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_init_board()
8306 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); bnx2_init_board()
8310 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); bnx2_init_board()
8313 bp->fw_version[j++] = ' '; bnx2_init_board()
8315 reg = bnx2_reg_rd_ind(bp, addr + i * 4); bnx2_init_board()
8317 memcpy(&bp->fw_version[j], &reg, 4); bnx2_init_board()
8322 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER); bnx2_init_board()
8323 bp->mac_addr[0] = (u8) (reg >> 8); bnx2_init_board()
8324 bp->mac_addr[1] = (u8) reg; bnx2_init_board()
8326 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER); bnx2_init_board()
8327 bp->mac_addr[2] = (u8) (reg >> 24); bnx2_init_board()
8328 bp->mac_addr[3] = (u8) (reg >> 16); bnx2_init_board()
8329 bp->mac_addr[4] = (u8) (reg >> 8); bnx2_init_board()
8330 bp->mac_addr[5] = (u8) reg; bnx2_init_board()
8332 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT; bnx2_init_board()
8333 bnx2_set_rx_ring_size(bp, 255); bnx2_init_board()
8335 bp->tx_quick_cons_trip_int = 2; bnx2_init_board()
8336 bp->tx_quick_cons_trip = 20; bnx2_init_board()
8337 bp->tx_ticks_int = 18; bnx2_init_board()
8338 bp->tx_ticks = 80; bnx2_init_board()
8340 bp->rx_quick_cons_trip_int = 2; bnx2_init_board()
8341 bp->rx_quick_cons_trip = 12; bnx2_init_board()
8342 bp->rx_ticks_int = 18; bnx2_init_board()
8343 bp->rx_ticks = 18; bnx2_init_board()
8345 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; bnx2_init_board()
8347 bp->current_interval = BNX2_TIMER_INTERVAL; bnx2_init_board()
8349 bp->phy_addr = 1; bnx2_init_board()
8357 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_board()
8358 bnx2_get_5709_media(bp); bnx2_init_board()
8359 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT) bnx2_init_board()
8360 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; bnx2_init_board()
8362 bp->phy_port = PORT_TP; bnx2_init_board()
8363 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { bnx2_init_board()
8364 bp->phy_port = PORT_FIBRE; bnx2_init_board()
8365 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); bnx2_init_board()
8367 bp->flags |= BNX2_FLAG_NO_WOL; bnx2_init_board()
8368 bp->wol = 0; bnx2_init_board()
8370 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) { bnx2_init_board()
8377 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL; bnx2_init_board()
8379 bp->phy_addr = 2; bnx2_init_board()
8381 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; bnx2_init_board()
8383 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 || bnx2_init_board()
8384 BNX2_CHIP(bp) == BNX2_CHIP_5708) bnx2_init_board()
8385 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; bnx2_init_board()
8386 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && bnx2_init_board()
8387 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax || bnx2_init_board()
8388 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx)) bnx2_init_board()
8389 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; bnx2_init_board()
8391 bnx2_init_fw_cap(bp); bnx2_init_board()
8393 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || bnx2_init_board()
8394 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || bnx2_init_board()
8395 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) || bnx2_init_board()
8396 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { bnx2_init_board()
8397 bp->flags |= BNX2_FLAG_NO_WOL; bnx2_init_board()
8398 bp->wol = 0; bnx2_init_board()
8401 if (bp->flags & BNX2_FLAG_NO_WOL) bnx2_init_board()
8402 device_set_wakeup_capable(&bp->pdev->dev, false); bnx2_init_board()
8404 device_set_wakeup_enable(&bp->pdev->dev, bp->wol); bnx2_init_board()
8406 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bnx2_init_board()
8407 bp->tx_quick_cons_trip_int = bnx2_init_board()
8408 bp->tx_quick_cons_trip; bnx2_init_board()
8409 bp->tx_ticks_int = bp->tx_ticks; bnx2_init_board()
8410 bp->rx_quick_cons_trip_int = bnx2_init_board()
8411 bp->rx_quick_cons_trip; bnx2_init_board()
8412 bp->rx_ticks_int = bp->rx_ticks; bnx2_init_board()
8413 bp->comp_prod_trip_int = bp->comp_prod_trip; bnx2_init_board()
8414 bp->com_ticks_int = bp->com_ticks; bnx2_init_board()
8415 bp->cmd_ticks_int = bp->cmd_ticks; bnx2_init_board()
8428 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) { bnx2_init_board()
8444 bnx2_set_default_link(bp); bnx2_init_board()
8445 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; bnx2_init_board()
8447 init_timer(&bp->timer); bnx2_init_board()
8448 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); bnx2_init_board()
8449 bp->timer.data = (unsigned long) bp; bnx2_init_board()
8450 bp->timer.function = bnx2_timer; bnx2_init_board()
8453 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) bnx2_init_board()
8454 bp->cnic_eth_dev.max_iscsi_conn = bnx2_init_board()
8455 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & bnx2_init_board()
8457 bp->cnic_probe = bnx2_cnic_probe; bnx2_init_board()
8464 if (bp->flags & BNX2_FLAG_AER_ENABLED) { bnx2_init_board()
8466 bp->flags &= ~BNX2_FLAG_AER_ENABLED; bnx2_init_board()
8469 pci_iounmap(pdev, bp->regview); bnx2_init_board()
8470 bp->regview = NULL; bnx2_init_board()
8479 kfree(bp->temp_stats_blk); bnx2_init_board()
8485 bnx2_bus_string(struct bnx2 *bp, char *str) bnx2_bus_string() argument
8489 if (bp->flags & BNX2_FLAG_PCIE) { bnx2_bus_string()
8493 if (bp->flags & BNX2_FLAG_PCIX) bnx2_bus_string()
8495 if (bp->flags & BNX2_FLAG_PCI_32BIT) bnx2_bus_string()
8499 s += sprintf(s, " %dMHz", bp->bus_speed_mhz); bnx2_bus_string()
8505 bnx2_del_napi(struct bnx2 *bp) bnx2_del_napi() argument
8509 for (i = 0; i < bp->irq_nvecs; i++) bnx2_del_napi()
8510 netif_napi_del(&bp->bnx2_napi[i].napi); bnx2_del_napi()
8514 bnx2_init_napi(struct bnx2 *bp) bnx2_init_napi() argument
8518 for (i = 0; i < bp->irq_nvecs; i++) { bnx2_init_napi()
8519 struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; bnx2_init_napi()
8527 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); bnx2_init_napi()
8528 bnapi->bp = bp; bnx2_init_napi()
8554 struct bnx2 *bp; bnx2_init_one() local
8562 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); bnx2_init_one()
8574 bp = netdev_priv(dev); bnx2_init_one()
8578 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); bnx2_init_one()
8584 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_init_one()
8592 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) bnx2_init_one()
8602 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A', bnx2_init_one()
8603 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4), bnx2_init_one()
8604 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), bnx2_init_one()
8610 pci_iounmap(pdev, bp->regview); bnx2_init_one()
8623 struct bnx2 *bp = netdev_priv(dev); bnx2_remove_one() local
8627 del_timer_sync(&bp->timer); bnx2_remove_one()
8628 cancel_work_sync(&bp->reset_task); bnx2_remove_one()
8630 pci_iounmap(bp->pdev, bp->regview); bnx2_remove_one()
8633 kfree(bp->temp_stats_blk); bnx2_remove_one()
8635 if (bp->flags & BNX2_FLAG_AER_ENABLED) { bnx2_remove_one()
8637 bp->flags &= ~BNX2_FLAG_AER_ENABLED; bnx2_remove_one()
8640 bnx2_release_firmware(bp); bnx2_remove_one()
8654 struct bnx2 *bp = netdev_priv(dev); bnx2_suspend() local
8657 cancel_work_sync(&bp->reset_task); bnx2_suspend()
8658 bnx2_netif_stop(bp, true); bnx2_suspend()
8660 del_timer_sync(&bp->timer); bnx2_suspend()
8661 bnx2_shutdown_chip(bp); bnx2_suspend()
8662 __bnx2_free_irq(bp); bnx2_suspend()
8663 bnx2_free_skbs(bp); bnx2_suspend()
8665 bnx2_setup_wol(bp); bnx2_suspend()
8674 struct bnx2 *bp = netdev_priv(dev); bnx2_resume() local
8679 bnx2_set_power_state(bp, PCI_D0); bnx2_resume()
8681 bnx2_request_irq(bp); bnx2_resume()
8682 bnx2_init_nic(bp, 1); bnx2_resume()
8683 bnx2_netif_start(bp, true); bnx2_resume()
8707 struct bnx2 *bp = netdev_priv(dev); bnx2_io_error_detected() local
8718 bnx2_netif_stop(bp, true); bnx2_io_error_detected()
8719 del_timer_sync(&bp->timer); bnx2_io_error_detected()
8720 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); bnx2_io_error_detected()
8739 struct bnx2 *bp = netdev_priv(dev); bnx2_io_slot_reset() local
8753 err = bnx2_init_nic(bp, 1); bnx2_io_slot_reset()
8760 bnx2_napi_enable(bp); bnx2_io_slot_reset()
8765 if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) bnx2_io_slot_reset()
8788 struct bnx2 *bp = netdev_priv(dev); bnx2_io_resume() local
8792 bnx2_netif_start(bp, true); bnx2_io_resume()
8801 struct bnx2 *bp; bnx2_shutdown() local
8806 bp = netdev_priv(dev); bnx2_shutdown()
8807 if (!bp) bnx2_shutdown()
8812 dev_close(bp->dev); bnx2_shutdown()
8815 bnx2_set_power_state(bp, PCI_D3hot); bnx2_shutdown()
H A Db44.c166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg) br32() argument
168 return ssb_read32(bp->sdev, reg); br32()
171 static inline void bw32(const struct b44 *bp, bw32() argument
174 ssb_write32(bp->sdev, reg, val); bw32()
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg, b44_wait_bit() argument
183 u32 val = br32(bp, reg); b44_wait_bit()
193 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", b44_wait_bit()
201 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) __b44_cam_read() argument
205 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | __b44_cam_read()
208 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); __b44_cam_read()
210 val = br32(bp, B44_CAM_DATA_LO); __b44_cam_read()
217 val = br32(bp, B44_CAM_DATA_HI); __b44_cam_read()
223 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) __b44_cam_write() argument
231 bw32(bp, B44_CAM_DATA_LO, val); __b44_cam_write()
235 bw32(bp, B44_CAM_DATA_HI, val); __b44_cam_write()
236 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | __b44_cam_write()
238 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); __b44_cam_write()
241 static inline void __b44_disable_ints(struct b44 *bp) __b44_disable_ints() argument
243 bw32(bp, B44_IMASK, 0); __b44_disable_ints()
246 static void b44_disable_ints(struct b44 *bp) b44_disable_ints() argument
248 __b44_disable_ints(bp); b44_disable_ints()
251 br32(bp, B44_IMASK); b44_disable_ints()
254 static void b44_enable_ints(struct b44 *bp) b44_enable_ints() argument
256 bw32(bp, B44_IMASK, bp->imask); b44_enable_ints()
259 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) __b44_readphy() argument
263 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); __b44_readphy()
264 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | __b44_readphy()
269 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); __b44_readphy()
270 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; __b44_readphy()
275 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) __b44_writephy() argument
277 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); __b44_writephy()
278 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | __b44_writephy()
284 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); __b44_writephy()
287 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) b44_readphy() argument
289 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_readphy()
292 return __b44_readphy(bp, bp->phy_addr, reg, val); b44_readphy()
295 static inline int b44_writephy(struct b44 *bp, int reg, u32 val) b44_writephy() argument
297 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_writephy()
300 return __b44_writephy(bp, bp->phy_addr, reg, val); b44_writephy()
307 struct b44 *bp = netdev_priv(dev); b44_mdio_read_mii() local
308 int rc = __b44_readphy(bp, phy_id, location, &val); b44_mdio_read_mii()
317 struct b44 *bp = netdev_priv(dev); b44_mdio_write_mii() local
318 __b44_writephy(bp, phy_id, location, val); b44_mdio_write_mii()
324 struct b44 *bp = bus->priv; b44_mdio_read_phylib() local
325 int rc = __b44_readphy(bp, phy_id, location, &val); b44_mdio_read_phylib()
334 struct b44 *bp = bus->priv; b44_mdio_write_phylib() local
335 return __b44_writephy(bp, phy_id, location, val); b44_mdio_write_phylib()
338 static int b44_phy_reset(struct b44 *bp) b44_phy_reset() argument
343 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_phy_reset()
345 err = b44_writephy(bp, MII_BMCR, BMCR_RESET); b44_phy_reset()
349 err = b44_readphy(bp, MII_BMCR, &val); b44_phy_reset()
352 netdev_err(bp->dev, "PHY Reset would not complete\n"); b44_phy_reset()
360 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) __b44_set_flow_ctrl() argument
364 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); __b44_set_flow_ctrl()
365 bp->flags |= pause_flags; __b44_set_flow_ctrl()
367 val = br32(bp, B44_RXCONFIG); __b44_set_flow_ctrl()
372 bw32(bp, B44_RXCONFIG, val); __b44_set_flow_ctrl()
374 val = br32(bp, B44_MAC_FLOW); __b44_set_flow_ctrl()
380 bw32(bp, B44_MAC_FLOW, val); __b44_set_flow_ctrl()
383 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) b44_set_flow_ctrl() argument
399 __b44_set_flow_ctrl(bp, pause_enab); b44_set_flow_ctrl()
404 static void b44_wap54g10_workaround(struct b44 *bp) b44_wap54g10_workaround() argument
418 err = __b44_readphy(bp, 0, MII_BMCR, &val); b44_wap54g10_workaround()
424 err = __b44_writephy(bp, 0, MII_BMCR, val); b44_wap54g10_workaround()
433 static inline void b44_wap54g10_workaround(struct b44 *bp) b44_wap54g10_workaround() argument
438 static int b44_setup_phy(struct b44 *bp) b44_setup_phy() argument
443 b44_wap54g10_workaround(bp); b44_setup_phy()
445 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_setup_phy()
447 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) b44_setup_phy()
449 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, b44_setup_phy()
452 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) b44_setup_phy()
454 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, b44_setup_phy()
458 if (!(bp->flags & B44_FLAG_FORCE_LINK)) { b44_setup_phy()
461 if (bp->flags & B44_FLAG_ADV_10HALF) b44_setup_phy()
463 if (bp->flags & B44_FLAG_ADV_10FULL) b44_setup_phy()
465 if (bp->flags & B44_FLAG_ADV_100HALF) b44_setup_phy()
467 if (bp->flags & B44_FLAG_ADV_100FULL) b44_setup_phy()
470 if (bp->flags & B44_FLAG_PAUSE_AUTO) b44_setup_phy()
473 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) b44_setup_phy()
475 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | b44_setup_phy()
481 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) b44_setup_phy()
484 if (bp->flags & B44_FLAG_100_BASE_T) b44_setup_phy()
486 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_setup_phy()
488 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) b44_setup_phy()
495 b44_set_flow_ctrl(bp, 0, 0); b44_setup_phy()
502 static void b44_stats_update(struct b44 *bp) b44_stats_update() argument
507 val = &bp->hw_stats.tx_good_octets; b44_stats_update()
508 u64_stats_update_begin(&bp->hw_stats.syncp); b44_stats_update()
511 *val++ += br32(bp, reg); b44_stats_update()
518 *val++ += br32(bp, reg); b44_stats_update()
521 u64_stats_update_end(&bp->hw_stats.syncp); b44_stats_update()
524 static void b44_link_report(struct b44 *bp) b44_link_report() argument
526 if (!netif_carrier_ok(bp->dev)) { b44_link_report()
527 netdev_info(bp->dev, "Link is down\n"); b44_link_report()
529 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", b44_link_report()
530 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, b44_link_report()
531 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); b44_link_report()
533 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", b44_link_report()
534 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", b44_link_report()
535 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); b44_link_report()
539 static void b44_check_phy(struct b44 *bp) b44_check_phy() argument
543 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_check_phy()
544 bp->flags |= B44_FLAG_100_BASE_T; b44_check_phy()
545 if (!netif_carrier_ok(bp->dev)) { b44_check_phy()
546 u32 val = br32(bp, B44_TX_CTRL); b44_check_phy()
547 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_check_phy()
551 bw32(bp, B44_TX_CTRL, val); b44_check_phy()
552 netif_carrier_on(bp->dev); b44_check_phy()
553 b44_link_report(bp); b44_check_phy()
558 if (!b44_readphy(bp, MII_BMSR, &bmsr) && b44_check_phy()
559 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && b44_check_phy()
562 bp->flags |= B44_FLAG_100_BASE_T; b44_check_phy()
564 bp->flags &= ~B44_FLAG_100_BASE_T; b44_check_phy()
566 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_check_phy()
568 bp->flags &= ~B44_FLAG_FULL_DUPLEX; b44_check_phy()
570 if (!netif_carrier_ok(bp->dev) && b44_check_phy()
572 u32 val = br32(bp, B44_TX_CTRL); b44_check_phy()
575 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_check_phy()
579 bw32(bp, B44_TX_CTRL, val); b44_check_phy()
581 if (!(bp->flags & B44_FLAG_FORCE_LINK) && b44_check_phy()
582 !b44_readphy(bp, MII_ADVERTISE, &local_adv) && b44_check_phy()
583 !b44_readphy(bp, MII_LPA, &remote_adv)) b44_check_phy()
584 b44_set_flow_ctrl(bp, local_adv, remote_adv); b44_check_phy()
587 netif_carrier_on(bp->dev); b44_check_phy()
588 b44_link_report(bp); b44_check_phy()
589 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { b44_check_phy()
591 netif_carrier_off(bp->dev); b44_check_phy()
592 b44_link_report(bp); b44_check_phy()
596 netdev_warn(bp->dev, "Remote fault detected in PHY\n"); b44_check_phy()
598 netdev_warn(bp->dev, "Jabber detected in PHY\n"); b44_check_phy()
604 struct b44 *bp = (struct b44 *) __opaque; b44_timer() local
606 spin_lock_irq(&bp->lock); b44_timer()
608 b44_check_phy(bp); b44_timer()
610 b44_stats_update(bp); b44_timer()
612 spin_unlock_irq(&bp->lock); b44_timer()
614 mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); b44_timer()
617 static void b44_tx(struct b44 *bp) b44_tx() argument
622 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; b44_tx()
626 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { b44_tx()
627 struct ring_info *rp = &bp->tx_buffers[cons]; b44_tx()
632 dma_unmap_single(bp->sdev->dma_dev, b44_tx()
644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); b44_tx()
645 bp->tx_cons = cons; b44_tx()
646 if (netif_queue_stopped(bp->dev) && b44_tx()
647 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) b44_tx()
648 netif_wake_queue(bp->dev); b44_tx()
650 bw32(bp, B44_GPTIMER, 0); b44_tx()
658 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) b44_alloc_rx_skb() argument
670 src_map = &bp->rx_buffers[src_idx]; b44_alloc_rx_skb()
672 map = &bp->rx_buffers[dest_idx]; b44_alloc_rx_skb()
673 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); b44_alloc_rx_skb()
677 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, b44_alloc_rx_skb()
683 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || b44_alloc_rx_skb()
686 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_alloc_rx_skb()
687 dma_unmap_single(bp->sdev->dma_dev, mapping, b44_alloc_rx_skb()
693 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, b44_alloc_rx_skb()
696 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || b44_alloc_rx_skb()
698 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_alloc_rx_skb()
699 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); b44_alloc_rx_skb()
703 bp->force_copybreak = 1; b44_alloc_rx_skb()
721 dp = &bp->rx_ring[dest_idx]; b44_alloc_rx_skb()
723 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); b44_alloc_rx_skb()
725 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_alloc_rx_skb()
726 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, b44_alloc_rx_skb()
733 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) b44_recycle_rx() argument
742 dest_desc = &bp->rx_ring[dest_idx]; b44_recycle_rx()
743 dest_map = &bp->rx_buffers[dest_idx]; b44_recycle_rx()
744 src_desc = &bp->rx_ring[src_idx]; b44_recycle_rx()
745 src_map = &bp->rx_buffers[src_idx]; b44_recycle_rx()
753 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_recycle_rx()
754 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, b44_recycle_rx()
769 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_recycle_rx()
770 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, b44_recycle_rx()
774 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, b44_recycle_rx()
779 static int b44_rx(struct b44 *bp, int budget) b44_rx() argument
785 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; b44_rx()
787 cons = bp->rx_cons; b44_rx()
790 struct ring_info *rp = &bp->rx_buffers[cons]; b44_rx()
796 dma_sync_single_for_cpu(bp->sdev->dma_dev, map, b44_rx()
804 b44_recycle_rx(bp, cons, bp->rx_prod); b44_rx()
806 bp->dev->stats.rx_dropped++; b44_rx()
825 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { b44_rx()
827 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); b44_rx()
830 dma_unmap_single(bp->sdev->dma_dev, map, b44_rx()
838 b44_recycle_rx(bp, cons, bp->rx_prod); b44_rx()
839 copy_skb = napi_alloc_skb(&bp->napi, len); b44_rx()
850 skb->protocol = eth_type_trans(skb, bp->dev); b44_rx()
855 bp->rx_prod = (bp->rx_prod + 1) & b44_rx()
860 bp->rx_cons = cons; b44_rx()
861 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); b44_rx()
868 struct b44 *bp = container_of(napi, struct b44, napi); b44_poll() local
872 spin_lock_irqsave(&bp->lock, flags); b44_poll()
874 if (bp->istat & (ISTAT_TX | ISTAT_TO)) { b44_poll()
875 /* spin_lock(&bp->tx_lock); */ b44_poll()
876 b44_tx(bp); b44_poll()
877 /* spin_unlock(&bp->tx_lock); */ b44_poll()
879 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ b44_poll()
880 bp->istat &= ~ISTAT_RFO; b44_poll()
881 b44_disable_ints(bp); b44_poll()
882 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ b44_poll()
883 b44_init_rings(bp); b44_poll()
884 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); b44_poll()
885 netif_wake_queue(bp->dev); b44_poll()
888 spin_unlock_irqrestore(&bp->lock, flags); b44_poll()
891 if (bp->istat & ISTAT_RX) b44_poll()
892 work_done += b44_rx(bp, budget); b44_poll()
894 if (bp->istat & ISTAT_ERRORS) { b44_poll()
895 spin_lock_irqsave(&bp->lock, flags); b44_poll()
896 b44_halt(bp); b44_poll()
897 b44_init_rings(bp); b44_poll()
898 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); b44_poll()
899 netif_wake_queue(bp->dev); b44_poll()
900 spin_unlock_irqrestore(&bp->lock, flags); b44_poll()
906 b44_enable_ints(bp); b44_poll()
915 struct b44 *bp = netdev_priv(dev); b44_interrupt() local
919 spin_lock(&bp->lock); b44_interrupt()
921 istat = br32(bp, B44_ISTAT); b44_interrupt()
922 imask = br32(bp, B44_IMASK); b44_interrupt()
937 if (napi_schedule_prep(&bp->napi)) { b44_interrupt()
941 bp->istat = istat; b44_interrupt()
942 __b44_disable_ints(bp); b44_interrupt()
943 __napi_schedule(&bp->napi); b44_interrupt()
947 bw32(bp, B44_ISTAT, istat); b44_interrupt()
948 br32(bp, B44_ISTAT); b44_interrupt()
950 spin_unlock(&bp->lock); b44_interrupt()
956 struct b44 *bp = netdev_priv(dev); b44_tx_timeout() local
960 spin_lock_irq(&bp->lock); b44_tx_timeout()
962 b44_halt(bp); b44_tx_timeout()
963 b44_init_rings(bp); b44_tx_timeout()
964 b44_init_hw(bp, B44_FULL_RESET); b44_tx_timeout()
966 spin_unlock_irq(&bp->lock); b44_tx_timeout()
968 b44_enable_ints(bp); b44_tx_timeout()
975 struct b44 *bp = netdev_priv(dev); b44_start_xmit() local
982 spin_lock_irqsave(&bp->lock, flags); b44_start_xmit()
985 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { b44_start_xmit()
991 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); b44_start_xmit()
992 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { b44_start_xmit()
996 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_start_xmit()
997 dma_unmap_single(bp->sdev->dma_dev, mapping, len, b44_start_xmit()
1004 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, b44_start_xmit()
1006 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { b44_start_xmit()
1007 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) b44_start_xmit()
1008 dma_unmap_single(bp->sdev->dma_dev, mapping, b44_start_xmit()
1019 entry = bp->tx_prod; b44_start_xmit()
1020 bp->tx_buffers[entry].skb = skb; b44_start_xmit()
1021 bp->tx_buffers[entry].mapping = mapping; b44_start_xmit()
1028 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); b44_start_xmit()
1029 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); b44_start_xmit()
1031 if (bp->flags & B44_FLAG_TX_RING_HACK) b44_start_xmit()
1032 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, b44_start_xmit()
1033 entry * sizeof(bp->tx_ring[0]), b44_start_xmit()
1038 bp->tx_prod = entry; b44_start_xmit()
1042 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); b44_start_xmit()
1043 if (bp->flags & B44_FLAG_BUGGY_TXPTR) b44_start_xmit()
1044 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); b44_start_xmit()
1045 if (bp->flags & B44_FLAG_REORDER_BUG) b44_start_xmit()
1046 br32(bp, B44_DMATX_PTR); b44_start_xmit()
1050 if (TX_BUFFS_AVAIL(bp) < 1) b44_start_xmit()
1054 spin_unlock_irqrestore(&bp->lock, flags); b44_start_xmit()
1065 struct b44 *bp = netdev_priv(dev); b44_change_mtu() local
1078 spin_lock_irq(&bp->lock); b44_change_mtu()
1079 b44_halt(bp); b44_change_mtu()
1081 b44_init_rings(bp); b44_change_mtu()
1082 b44_init_hw(bp, B44_FULL_RESET); b44_change_mtu()
1083 spin_unlock_irq(&bp->lock); b44_change_mtu()
1085 b44_enable_ints(bp); b44_change_mtu()
1094 * end up in the driver. bp->lock is not held and we are not
1097 static void b44_free_rings(struct b44 *bp) b44_free_rings() argument
1103 rp = &bp->rx_buffers[i]; b44_free_rings()
1107 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, b44_free_rings()
1115 rp = &bp->tx_buffers[i]; b44_free_rings()
1119 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, b44_free_rings()
1132 static void b44_init_rings(struct b44 *bp) b44_init_rings() argument
1136 b44_free_rings(bp); b44_init_rings()
1138 memset(bp->rx_ring, 0, B44_RX_RING_BYTES); b44_init_rings()
1139 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); b44_init_rings()
1141 if (bp->flags & B44_FLAG_RX_RING_HACK) b44_init_rings()
1142 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, b44_init_rings()
1145 if (bp->flags & B44_FLAG_TX_RING_HACK) b44_init_rings()
1146 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, b44_init_rings()
1149 for (i = 0; i < bp->rx_pending; i++) { b44_init_rings()
1150 if (b44_alloc_rx_skb(bp, -1, i) < 0) b44_init_rings()
1159 static void b44_free_consistent(struct b44 *bp) b44_free_consistent() argument
1161 kfree(bp->rx_buffers); b44_free_consistent()
1162 bp->rx_buffers = NULL; b44_free_consistent()
1163 kfree(bp->tx_buffers); b44_free_consistent()
1164 bp->tx_buffers = NULL; b44_free_consistent()
1165 if (bp->rx_ring) { b44_free_consistent()
1166 if (bp->flags & B44_FLAG_RX_RING_HACK) { b44_free_consistent()
1167 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, b44_free_consistent()
1169 kfree(bp->rx_ring); b44_free_consistent()
1171 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, b44_free_consistent()
1172 bp->rx_ring, bp->rx_ring_dma); b44_free_consistent()
1173 bp->rx_ring = NULL; b44_free_consistent()
1174 bp->flags &= ~B44_FLAG_RX_RING_HACK; b44_free_consistent()
1176 if (bp->tx_ring) { b44_free_consistent()
1177 if (bp->flags & B44_FLAG_TX_RING_HACK) { b44_free_consistent()
1178 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, b44_free_consistent()
1180 kfree(bp->tx_ring); b44_free_consistent()
1182 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, b44_free_consistent()
1183 bp->tx_ring, bp->tx_ring_dma); b44_free_consistent()
1184 bp->tx_ring = NULL; b44_free_consistent()
1185 bp->flags &= ~B44_FLAG_TX_RING_HACK; b44_free_consistent()
1193 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) b44_alloc_consistent() argument
1198 bp->rx_buffers = kzalloc(size, gfp); b44_alloc_consistent()
1199 if (!bp->rx_buffers) b44_alloc_consistent()
1203 bp->tx_buffers = kzalloc(size, gfp); b44_alloc_consistent()
1204 if (!bp->tx_buffers) b44_alloc_consistent()
1208 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, b44_alloc_consistent()
1209 &bp->rx_ring_dma, gfp); b44_alloc_consistent()
1210 if (!bp->rx_ring) { b44_alloc_consistent()
1221 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, b44_alloc_consistent()
1225 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || b44_alloc_consistent()
1231 bp->rx_ring = rx_ring; b44_alloc_consistent()
1232 bp->rx_ring_dma = rx_ring_dma; b44_alloc_consistent()
1233 bp->flags |= B44_FLAG_RX_RING_HACK; b44_alloc_consistent()
1236 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, b44_alloc_consistent()
1237 &bp->tx_ring_dma, gfp); b44_alloc_consistent()
1238 if (!bp->tx_ring) { b44_alloc_consistent()
1249 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, b44_alloc_consistent()
1253 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || b44_alloc_consistent()
1259 bp->tx_ring = tx_ring; b44_alloc_consistent()
1260 bp->tx_ring_dma = tx_ring_dma; b44_alloc_consistent()
1261 bp->flags |= B44_FLAG_TX_RING_HACK; b44_alloc_consistent()
1267 b44_free_consistent(bp); b44_alloc_consistent()
1271 /* bp->lock is held. */ b44_clear_stats()
1272 static void b44_clear_stats(struct b44 *bp) b44_clear_stats() argument
1276 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); b44_clear_stats()
1278 br32(bp, reg); b44_clear_stats()
1280 br32(bp, reg); b44_clear_stats()
1283 /* bp->lock is held. */ b44_chip_reset()
1284 static void b44_chip_reset(struct b44 *bp, int reset_kind) b44_chip_reset() argument
1286 struct ssb_device *sdev = bp->sdev; b44_chip_reset()
1289 was_enabled = ssb_device_is_enabled(bp->sdev); b44_chip_reset()
1291 ssb_device_enable(bp->sdev, 0); b44_chip_reset()
1295 bw32(bp, B44_RCV_LAZY, 0); b44_chip_reset()
1296 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_chip_reset()
1297 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); b44_chip_reset()
1298 bw32(bp, B44_DMATX_CTRL, 0); b44_chip_reset()
1299 bp->tx_prod = bp->tx_cons = 0; b44_chip_reset()
1300 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { b44_chip_reset()
1301 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, b44_chip_reset()
1304 bw32(bp, B44_DMARX_CTRL, 0); b44_chip_reset()
1305 bp->rx_prod = bp->rx_cons = 0; b44_chip_reset()
1308 b44_clear_stats(bp); b44_chip_reset()
1319 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | b44_chip_reset()
1325 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | b44_chip_reset()
1334 br32(bp, B44_MDIO_CTRL); b44_chip_reset()
1336 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { b44_chip_reset()
1337 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); b44_chip_reset()
1338 br32(bp, B44_ENET_CTRL); b44_chip_reset()
1339 bp->flags |= B44_FLAG_EXTERNAL_PHY; b44_chip_reset()
1341 u32 val = br32(bp, B44_DEVCTRL); b44_chip_reset()
1344 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); b44_chip_reset()
1345 br32(bp, B44_DEVCTRL); b44_chip_reset()
1348 bp->flags &= ~B44_FLAG_EXTERNAL_PHY; b44_chip_reset()
1352 /* bp->lock is held. */ b44_halt()
1353 static void b44_halt(struct b44 *bp) b44_halt() argument
1355 b44_disable_ints(bp); b44_halt()
1357 b44_phy_reset(bp); b44_halt()
1359 netdev_info(bp->dev, "powering down PHY\n"); b44_halt()
1360 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); b44_halt()
1363 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_halt()
1364 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_halt()
1366 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); b44_halt()
1369 /* bp->lock is held. */ __b44_set_mac_addr()
1370 static void __b44_set_mac_addr(struct b44 *bp) __b44_set_mac_addr() argument
1372 bw32(bp, B44_CAM_CTRL, 0); __b44_set_mac_addr()
1373 if (!(bp->dev->flags & IFF_PROMISC)) { __b44_set_mac_addr()
1376 __b44_cam_write(bp, bp->dev->dev_addr, 0); __b44_set_mac_addr()
1377 val = br32(bp, B44_CAM_CTRL); __b44_set_mac_addr()
1378 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); __b44_set_mac_addr()
1384 struct b44 *bp = netdev_priv(dev); b44_set_mac_addr() local
1396 spin_lock_irq(&bp->lock); b44_set_mac_addr()
1398 val = br32(bp, B44_RXCONFIG); b44_set_mac_addr()
1400 __b44_set_mac_addr(bp); b44_set_mac_addr()
1402 spin_unlock_irq(&bp->lock); b44_set_mac_addr()
1408 * packet processing. Invoked with bp->lock held.
1411 static void b44_init_hw(struct b44 *bp, int reset_kind) b44_init_hw() argument
1415 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_init_hw()
1417 b44_phy_reset(bp); b44_init_hw()
1418 b44_setup_phy(bp); b44_init_hw()
1422 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); b44_init_hw()
1423 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); b44_init_hw()
1426 __b44_set_rx_mode(bp->dev); b44_init_hw()
1429 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); b44_init_hw()
1430 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); b44_init_hw()
1432 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ b44_init_hw()
1434 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | b44_init_hw()
1437 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); b44_init_hw()
1438 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); b44_init_hw()
1439 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | b44_init_hw()
1441 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); b44_init_hw()
1443 bw32(bp, B44_DMARX_PTR, bp->rx_pending); b44_init_hw()
1444 bp->rx_prod = bp->rx_pending; b44_init_hw()
1446 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); b44_init_hw()
1449 val = br32(bp, B44_ENET_CTRL); b44_init_hw()
1450 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); b44_init_hw()
1452 netdev_reset_queue(bp->dev); b44_init_hw()
1457 struct b44 *bp = netdev_priv(dev); b44_open() local
1460 err = b44_alloc_consistent(bp, GFP_KERNEL); b44_open()
1464 napi_enable(&bp->napi); b44_open()
1466 b44_init_rings(bp); b44_open()
1467 b44_init_hw(bp, B44_FULL_RESET); b44_open()
1469 b44_check_phy(bp); b44_open()
1473 napi_disable(&bp->napi); b44_open()
1474 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); b44_open()
1475 b44_free_rings(bp); b44_open()
1476 b44_free_consistent(bp); b44_open()
1480 init_timer(&bp->timer); b44_open()
1481 bp->timer.expires = jiffies + HZ; b44_open()
1482 bp->timer.data = (unsigned long) bp; b44_open()
1483 bp->timer.function = b44_timer; b44_open()
1484 add_timer(&bp->timer); b44_open()
1486 b44_enable_ints(bp); b44_open()
1488 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_open()
1489 phy_start(bp->phydev); b44_open()
1509 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) bwfilter_table() argument
1515 bw32(bp, B44_FILT_ADDR, table_offset + i); bwfilter_table()
1516 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); bwfilter_table()
1549 static void b44_setup_pseudo_magicp(struct b44 *bp) b44_setup_pseudo_magicp() argument
1563 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp()
1566 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); b44_setup_pseudo_magicp()
1567 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); b44_setup_pseudo_magicp()
1572 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp()
1575 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, b44_setup_pseudo_magicp()
1577 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, b44_setup_pseudo_magicp()
1583 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, b44_setup_pseudo_magicp()
1586 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, b44_setup_pseudo_magicp()
1588 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, b44_setup_pseudo_magicp()
1595 bw32(bp, B44_WKUP_LEN, val); b44_setup_pseudo_magicp()
1598 val = br32(bp, B44_DEVCTRL); b44_setup_pseudo_magicp()
1599 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); b44_setup_pseudo_magicp()
1604 static void b44_setup_wol_pci(struct b44 *bp) b44_setup_wol_pci() argument
1608 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { b44_setup_wol_pci()
1609 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); b44_setup_wol_pci()
1610 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); b44_setup_wol_pci()
1611 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); b44_setup_wol_pci()
1615 static inline void b44_setup_wol_pci(struct b44 *bp) { } b44_setup_wol_pci() argument
1618 static void b44_setup_wol(struct b44 *bp) b44_setup_wol() argument
1622 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); b44_setup_wol()
1624 if (bp->flags & B44_FLAG_B0_ANDLATER) { b44_setup_wol()
1626 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); b44_setup_wol()
1628 val = bp->dev->dev_addr[2] << 24 | b44_setup_wol()
1629 bp->dev->dev_addr[3] << 16 | b44_setup_wol()
1630 bp->dev->dev_addr[4] << 8 | b44_setup_wol()
1631 bp->dev->dev_addr[5]; b44_setup_wol()
1632 bw32(bp, B44_ADDR_LO, val); b44_setup_wol()
1634 val = bp->dev->dev_addr[0] << 8 | b44_setup_wol()
1635 bp->dev->dev_addr[1]; b44_setup_wol()
1636 bw32(bp, B44_ADDR_HI, val); b44_setup_wol()
1638 val = br32(bp, B44_DEVCTRL); b44_setup_wol()
1639 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); b44_setup_wol()
1642 b44_setup_pseudo_magicp(bp); b44_setup_wol()
1644 b44_setup_wol_pci(bp); b44_setup_wol()
1649 struct b44 *bp = netdev_priv(dev); b44_close() local
1653 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_close()
1654 phy_stop(bp->phydev); b44_close()
1656 napi_disable(&bp->napi); b44_close()
1658 del_timer_sync(&bp->timer); b44_close()
1660 spin_lock_irq(&bp->lock); b44_close()
1662 b44_halt(bp); b44_close()
1663 b44_free_rings(bp); b44_close()
1666 spin_unlock_irq(&bp->lock); b44_close()
1670 if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_close()
1671 b44_init_hw(bp, B44_PARTIAL_RESET); b44_close()
1672 b44_setup_wol(bp); b44_close()
1675 b44_free_consistent(bp); b44_close()
1683 struct b44 *bp = netdev_priv(dev); b44_get_stats64() local
1684 struct b44_hw_stats *hwstat = &bp->hw_stats; b44_get_stats64()
1727 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) __b44_load_mcast() argument
1737 __b44_cam_write(bp, ha->addr, i++ + 1); netdev_for_each_mc_addr()
1744 struct b44 *bp = netdev_priv(dev); __b44_set_rx_mode() local
1747 val = br32(bp, B44_RXCONFIG); __b44_set_rx_mode()
1751 bw32(bp, B44_RXCONFIG, val); __b44_set_rx_mode()
1756 __b44_set_mac_addr(bp); __b44_set_rx_mode()
1762 i = __b44_load_mcast(bp, dev); __b44_set_rx_mode()
1765 __b44_cam_write(bp, zero, i); __b44_set_rx_mode()
1767 bw32(bp, B44_RXCONFIG, val); __b44_set_rx_mode()
1768 val = br32(bp, B44_CAM_CTRL); __b44_set_rx_mode()
1769 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); __b44_set_rx_mode()
1775 struct b44 *bp = netdev_priv(dev); b44_set_rx_mode() local
1777 spin_lock_irq(&bp->lock); b44_set_rx_mode()
1779 spin_unlock_irq(&bp->lock); b44_set_rx_mode()
1784 struct b44 *bp = netdev_priv(dev); b44_get_msglevel() local
1785 return bp->msg_enable; b44_get_msglevel()
1790 struct b44 *bp = netdev_priv(dev); b44_set_msglevel() local
1791 bp->msg_enable = value; b44_set_msglevel()
1796 struct b44 *bp = netdev_priv(dev); b44_get_drvinfo() local
1797 struct ssb_bus *bus = bp->sdev->bus; b44_get_drvinfo()
1817 struct b44 *bp = netdev_priv(dev); b44_nway_reset() local
1821 spin_lock_irq(&bp->lock); b44_nway_reset()
1822 b44_readphy(bp, MII_BMCR, &bmcr); b44_nway_reset()
1823 b44_readphy(bp, MII_BMCR, &bmcr); b44_nway_reset()
1826 b44_writephy(bp, MII_BMCR, b44_nway_reset()
1830 spin_unlock_irq(&bp->lock); b44_nway_reset()
1837 struct b44 *bp = netdev_priv(dev); b44_get_settings() local
1839 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_get_settings()
1840 BUG_ON(!bp->phydev); b44_get_settings()
1841 return phy_ethtool_gset(bp->phydev, cmd); b44_get_settings()
1852 if (bp->flags & B44_FLAG_ADV_10HALF) b44_get_settings()
1854 if (bp->flags & B44_FLAG_ADV_10FULL) b44_get_settings()
1856 if (bp->flags & B44_FLAG_ADV_100HALF) b44_get_settings()
1858 if (bp->flags & B44_FLAG_ADV_100FULL) b44_get_settings()
1861 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ? b44_get_settings()
1863 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? b44_get_settings()
1866 cmd->phy_address = bp->phy_addr; b44_get_settings()
1867 cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ? b44_get_settings()
1869 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? b44_get_settings()
1884 struct b44 *bp = netdev_priv(dev); b44_set_settings() local
1888 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_set_settings()
1889 BUG_ON(!bp->phydev); b44_set_settings()
1890 spin_lock_irq(&bp->lock); b44_set_settings()
1892 b44_setup_phy(bp); b44_set_settings()
1894 ret = phy_ethtool_sset(bp->phydev, cmd); b44_set_settings()
1896 spin_unlock_irq(&bp->lock); b44_set_settings()
1916 spin_lock_irq(&bp->lock); b44_set_settings()
1919 bp->flags &= ~(B44_FLAG_FORCE_LINK | b44_set_settings()
1927 bp->flags |= (B44_FLAG_ADV_10HALF | b44_set_settings()
1933 bp->flags |= B44_FLAG_ADV_10HALF; b44_set_settings()
1935 bp->flags |= B44_FLAG_ADV_10FULL; b44_set_settings()
1937 bp->flags |= B44_FLAG_ADV_100HALF; b44_set_settings()
1939 bp->flags |= B44_FLAG_ADV_100FULL; b44_set_settings()
1942 bp->flags |= B44_FLAG_FORCE_LINK; b44_set_settings()
1943 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); b44_set_settings()
1945 bp->flags |= B44_FLAG_100_BASE_T; b44_set_settings()
1947 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_set_settings()
1951 b44_setup_phy(bp); b44_set_settings()
1953 spin_unlock_irq(&bp->lock); b44_set_settings()
1961 struct b44 *bp = netdev_priv(dev); b44_get_ringparam() local
1964 ering->rx_pending = bp->rx_pending; b44_get_ringparam()
1972 struct b44 *bp = netdev_priv(dev); b44_set_ringparam() local
1980 spin_lock_irq(&bp->lock); b44_set_ringparam()
1982 bp->rx_pending = ering->rx_pending; b44_set_ringparam()
1983 bp->tx_pending = ering->tx_pending; b44_set_ringparam()
1985 b44_halt(bp); b44_set_ringparam()
1986 b44_init_rings(bp); b44_set_ringparam()
1987 b44_init_hw(bp, B44_FULL_RESET); b44_set_ringparam()
1988 netif_wake_queue(bp->dev); b44_set_ringparam()
1989 spin_unlock_irq(&bp->lock); b44_set_ringparam()
1991 b44_enable_ints(bp); b44_set_ringparam()
1999 struct b44 *bp = netdev_priv(dev); b44_get_pauseparam() local
2002 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; b44_get_pauseparam()
2004 (bp->flags & B44_FLAG_RX_PAUSE) != 0; b44_get_pauseparam()
2006 (bp->flags & B44_FLAG_TX_PAUSE) != 0; b44_get_pauseparam()
2012 struct b44 *bp = netdev_priv(dev); b44_set_pauseparam() local
2014 spin_lock_irq(&bp->lock); b44_set_pauseparam()
2016 bp->flags |= B44_FLAG_PAUSE_AUTO; b44_set_pauseparam()
2018 bp->flags &= ~B44_FLAG_PAUSE_AUTO; b44_set_pauseparam()
2020 bp->flags |= B44_FLAG_RX_PAUSE; b44_set_pauseparam()
2022 bp->flags &= ~B44_FLAG_RX_PAUSE; b44_set_pauseparam()
2024 bp->flags |= B44_FLAG_TX_PAUSE; b44_set_pauseparam()
2026 bp->flags &= ~B44_FLAG_TX_PAUSE; b44_set_pauseparam()
2027 if (bp->flags & B44_FLAG_PAUSE_AUTO) { b44_set_pauseparam()
2028 b44_halt(bp); b44_set_pauseparam()
2029 b44_init_rings(bp); b44_set_pauseparam()
2030 b44_init_hw(bp, B44_FULL_RESET); b44_set_pauseparam()
2032 __b44_set_flow_ctrl(bp, bp->flags); b44_set_pauseparam()
2034 spin_unlock_irq(&bp->lock); b44_set_pauseparam()
2036 b44_enable_ints(bp); b44_set_pauseparam()
2063 struct b44 *bp = netdev_priv(dev); b44_get_ethtool_stats() local
2064 struct b44_hw_stats *hwstat = &bp->hw_stats; b44_get_ethtool_stats()
2069 spin_lock_irq(&bp->lock); b44_get_ethtool_stats()
2070 b44_stats_update(bp); b44_get_ethtool_stats()
2071 spin_unlock_irq(&bp->lock); b44_get_ethtool_stats()
2086 struct b44 *bp = netdev_priv(dev); b44_get_wol() local
2089 if (bp->flags & B44_FLAG_WOL_ENABLE) b44_get_wol()
2098 struct b44 *bp = netdev_priv(dev); b44_set_wol() local
2100 spin_lock_irq(&bp->lock); b44_set_wol()
2102 bp->flags |= B44_FLAG_WOL_ENABLE; b44_set_wol()
2104 bp->flags &= ~B44_FLAG_WOL_ENABLE; b44_set_wol()
2105 spin_unlock_irq(&bp->lock); b44_set_wol()
2107 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC); b44_set_wol()
2132 struct b44 *bp = netdev_priv(dev); b44_ioctl() local
2138 spin_lock_irq(&bp->lock); b44_ioctl()
2139 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_ioctl()
2140 BUG_ON(!bp->phydev); b44_ioctl()
2141 err = phy_mii_ioctl(bp->phydev, ifr, cmd); b44_ioctl()
2143 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL); b44_ioctl()
2145 spin_unlock_irq(&bp->lock); b44_ioctl()
2150 static int b44_get_invariants(struct b44 *bp) b44_get_invariants() argument
2152 struct ssb_device *sdev = bp->sdev; b44_get_invariants()
2156 bp->dma_offset = ssb_dma_translation(sdev); b44_get_invariants()
2161 bp->phy_addr = sdev->bus->sprom.et1phyaddr; b44_get_invariants()
2164 bp->phy_addr = sdev->bus->sprom.et0phyaddr; b44_get_invariants()
2169 bp->phy_addr &= 0x1F; b44_get_invariants()
2171 memcpy(bp->dev->dev_addr, addr, ETH_ALEN); b44_get_invariants()
2173 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ b44_get_invariants()
2178 bp->imask = IMASK_DEF; b44_get_invariants()
2181 bp->flags |= B44_FLAG_BUGGY_TXPTR; b44_get_invariants()
2184 if (bp->sdev->id.revision >= 7) b44_get_invariants()
2185 bp->flags |= B44_FLAG_B0_ANDLATER; b44_get_invariants()
2208 struct b44 *bp = netdev_priv(dev); b44_adjust_link() local
2209 struct phy_device *phydev = bp->phydev; b44_adjust_link()
2214 if (bp->old_link != phydev->link) { b44_adjust_link()
2216 bp->old_link = phydev->link; b44_adjust_link()
2222 (bp->flags & B44_FLAG_FULL_DUPLEX)) { b44_adjust_link()
2224 bp->flags &= ~B44_FLAG_FULL_DUPLEX; b44_adjust_link()
2226 !(bp->flags & B44_FLAG_FULL_DUPLEX)) { b44_adjust_link()
2228 bp->flags |= B44_FLAG_FULL_DUPLEX; b44_adjust_link()
2233 u32 val = br32(bp, B44_TX_CTRL); b44_adjust_link()
2234 if (bp->flags & B44_FLAG_FULL_DUPLEX) b44_adjust_link()
2238 bw32(bp, B44_TX_CTRL, val); b44_adjust_link()
2243 static int b44_register_phy_one(struct b44 *bp) b44_register_phy_one() argument
2246 struct ssb_device *sdev = bp->sdev; b44_register_phy_one()
2259 mii_bus->priv = bp; b44_register_phy_one()
2264 mii_bus->phy_mask = ~(1 << bp->phy_addr); b44_register_phy_one()
2275 bp->mii_bus = mii_bus; b44_register_phy_one()
2283 if (!bp->mii_bus->phy_map[bp->phy_addr] && b44_register_phy_one()
2288 bp->phy_addr); b44_register_phy_one()
2290 bp->phy_addr = 0; b44_register_phy_one()
2292 bp->phy_addr); b44_register_phy_one()
2295 bp->phy_addr); b44_register_phy_one()
2298 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, b44_register_phy_one()
2302 bp->phy_addr); b44_register_phy_one()
2314 bp->phydev = phydev; b44_register_phy_one()
2315 bp->old_link = 0; b44_register_phy_one()
2316 bp->phy_addr = phydev->addr; b44_register_phy_one()
2336 static void b44_unregister_phy_one(struct b44 *bp) b44_unregister_phy_one() argument
2338 struct mii_bus *mii_bus = bp->mii_bus; b44_unregister_phy_one()
2340 phy_disconnect(bp->phydev); b44_unregister_phy_one()
2350 struct b44 *bp; b44_init_one() local
2357 dev = alloc_etherdev(sizeof(*bp)); b44_init_one()
2368 bp = netdev_priv(dev); b44_init_one()
2369 bp->sdev = sdev; b44_init_one()
2370 bp->dev = dev; b44_init_one()
2371 bp->force_copybreak = 0; b44_init_one()
2373 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); b44_init_one()
2375 spin_lock_init(&bp->lock); b44_init_one()
2377 bp->rx_pending = B44_DEF_RX_RING_PENDING; b44_init_one()
2378 bp->tx_pending = B44_DEF_TX_RING_PENDING; b44_init_one()
2381 netif_napi_add(dev, &bp->napi, b44_poll, 64); b44_init_one()
2399 err = b44_get_invariants(bp); b44_init_one()
2406 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { b44_init_one()
2412 bp->mii_if.dev = dev; b44_init_one()
2413 bp->mii_if.mdio_read = b44_mdio_read_mii; b44_init_one()
2414 bp->mii_if.mdio_write = b44_mdio_write_mii; b44_init_one()
2415 bp->mii_if.phy_id = bp->phy_addr; b44_init_one()
2416 bp->mii_if.phy_id_mask = 0x1f; b44_init_one()
2417 bp->mii_if.reg_num_mask = 0x1f; b44_init_one()
2420 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | b44_init_one()
2424 bp->flags |= B44_FLAG_PAUSE_AUTO; b44_init_one()
2439 b44_chip_reset(bp, B44_CHIP_RESET_FULL); b44_init_one()
2442 err = b44_phy_reset(bp); b44_init_one()
2448 if (bp->flags & B44_FLAG_EXTERNAL_PHY) { b44_init_one()
2449 err = b44_register_phy_one(bp); b44_init_one()
2467 netif_napi_del(&bp->napi); b44_init_one()
2477 struct b44 *bp = netdev_priv(dev); b44_remove_one() local
2480 if (bp->flags & B44_FLAG_EXTERNAL_PHY) b44_remove_one()
2481 b44_unregister_phy_one(bp); b44_remove_one()
2484 netif_napi_del(&bp->napi); b44_remove_one()
2493 struct b44 *bp = netdev_priv(dev); b44_suspend() local
2498 del_timer_sync(&bp->timer); b44_suspend()
2500 spin_lock_irq(&bp->lock); b44_suspend()
2502 b44_halt(bp); b44_suspend()
2503 netif_carrier_off(bp->dev); b44_suspend()
2504 netif_device_detach(bp->dev); b44_suspend()
2505 b44_free_rings(bp); b44_suspend()
2507 spin_unlock_irq(&bp->lock); b44_suspend()
2510 if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_suspend()
2511 b44_init_hw(bp, B44_PARTIAL_RESET); b44_suspend()
2512 b44_setup_wol(bp); b44_suspend()
2522 struct b44 *bp = netdev_priv(dev); b44_resume() local
2535 spin_lock_irq(&bp->lock); b44_resume()
2536 b44_init_rings(bp); b44_resume()
2537 b44_init_hw(bp, B44_FULL_RESET); b44_resume()
2538 spin_unlock_irq(&bp->lock); b44_resume()
2548 spin_lock_irq(&bp->lock); b44_resume()
2549 b44_halt(bp); b44_resume()
2550 b44_free_rings(bp); b44_resume()
2551 spin_unlock_irq(&bp->lock); b44_resume()
2555 netif_device_attach(bp->dev); b44_resume()
2557 b44_enable_ints(bp); b44_resume()
2560 mod_timer(&bp->timer, jiffies + 1); b44_resume()
H A Dbnx2_fw.h22 .bp = BNX2_COM_CPU_HW_BREAKPOINT,
38 .bp = BNX2_CP_CPU_HW_BREAKPOINT,
54 .bp = BNX2_RXP_CPU_HW_BREAKPOINT,
70 .bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
86 .bp = BNX2_TXP_CPU_HW_BREAKPOINT,
H A Dcnic.h368 #define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
405 #define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
406 (BP_VN(bp) << 17) | (x))
410 #define BNX2X_CL_QZONE_ID(bp, cli) \
411 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
412 cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
416 (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
417 ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
422 (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
H A Dcnic.c1193 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_alloc_bnx2x_context() local
1211 if (!CHIP_IS_E1(bp)) cnic_alloc_bnx2x_context()
1241 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_alloc_bnx2x_resc() local
1251 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_alloc_bnx2x_resc()
1299 if (CNIC_SUPPORTS_FCOE(bp)) { cnic_alloc_bnx2x_resc()
1393 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_submit_kwqe_16() local
1401 BNX2X_HW_CID(bp, cid))); cnic_submit_kwqe_16()
1404 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & cnic_submit_kwqe_16()
1442 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_set_tcp_options() local
1454 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); cnic_bnx2x_set_tcp_options()
1457 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); cnic_bnx2x_set_tcp_options()
1463 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_init1() local
1466 u32 pfid = bp->pfid; cnic_bnx2x_iscsi_init1()
1549 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_init2() local
1550 u32 pfid = bp->pfid; cnic_bnx2x_iscsi_init2()
1689 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_setup_bnx2x_ctx() local
1698 u32 hw_cid = BNX2X_HW_CID(bp, cid); cnic_setup_bnx2x_ctx()
1702 u8 port = BP_PORT(bp); cnic_setup_bnx2x_ctx()
1756 if (BNX2X_CHIP_IS_E2_PLUS(bp) && cnic_setup_bnx2x_ctx()
1757 bp->common.chip_port_mode == CHIP_2_PORT_MODE) { cnic_setup_bnx2x_ctx()
1878 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_iscsi_ofld1() local
1932 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); cnic_bnx2x_iscsi_ofld1()
1968 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_destroy_ramrod() local
1977 hw_cid = BNX2X_HW_CID(bp, ctx->cid); cnic_bnx2x_destroy_ramrod()
2085 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_mac() local
2086 u32 pfid = bp->pfid; cnic_init_bnx2x_mac()
2123 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_connect() local
2192 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); cnic_bnx2x_connect()
2261 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_stat() local
2266 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_stat()
2285 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_init1() local
2330 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_init1()
2343 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_ofld1() local
2386 u32 hw_cid = BNX2X_HW_CID(bp, cid); cnic_bnx2x_fcoe_ofld1()
2410 cid = BNX2X_HW_CID(bp, cid); cnic_bnx2x_fcoe_ofld1()
2568 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_bnx2x_fcoe_fw_destroy() local
2575 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); cnic_bnx2x_fcoe_fw_destroy()
2732 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_submit_bnx2x_fcoe_kwqes() local
2740 if (!BNX2X_CHIP_IS_E2_PLUS(bp)) cnic_submit_bnx2x_fcoe_kwqes()
3056 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_ack_bnx2x_int() local
3057 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + cnic_ack_bnx2x_int()
3144 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_service_bnx2x_bh() local
3156 if (!CNIC_SUPPORTS_FCOE(bp)) { cnic_service_bnx2x_bh()
4236 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_cm_init_bnx2x_hw() local
4237 u32 pfid = bp->pfid; cnic_cm_init_bnx2x_hw()
4238 u32 port = BP_PORT(bp); cnic_cm_init_bnx2x_hw()
4883 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_storm_memset_hc_disable() local
4901 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_enable_bnx2x_int() local
4920 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_tx_ring() local
4949 if (BNX2X_CHIP_IS_E2_PLUS(bp)) cnic_init_bnx2x_tx_ring()
4986 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_rx_ring() local
4995 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); cnic_init_bnx2x_rx_ring()
5004 data->general.func_id = bp->pfid; cnic_init_bnx2x_rx_ring()
5053 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_bnx2x_kcq() local
5054 u32 pfid = bp->pfid; cnic_init_bnx2x_kcq()
5060 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_init_bnx2x_kcq()
5076 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_init_bnx2x_kcq()
5092 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_start_bnx2x_hw() local
5098 cp->func = bp->pf_num; cnic_start_bnx2x_hw()
5101 pfid = bp->pfid; cnic_start_bnx2x_hw()
5109 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_start_bnx2x_hw()
5168 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_init_rings() local
5191 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); cnic_init_rings()
5194 (BNX2X_CHIP_IS_E2_PLUS(bp) ? cnic_init_rings()
5196 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); cnic_init_rings()
5230 *(cid_ptr + 1) = cid * bp->db_size; cnic_init_rings()
5378 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_stop_bnx2x_hw() local
5385 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { cnic_stop_bnx2x_hw()
5402 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); cnic_stop_bnx2x_hw()
5452 struct bnx2x *bp = netdev_priv(dev->netdev); cnic_get_fc_npiv_tbl() local
5458 if (!BNX2X_CHIP_IS_E2_PLUS(bp)) cnic_get_fc_npiv_tbl()
5502 struct bnx2 *bp = netdev_priv(dev); init_bnx2_cnic() local
5505 if (bp->cnic_probe) init_bnx2_cnic()
5506 ethdev = (bp->cnic_probe)(dev); init_bnx2_cnic()
5562 struct bnx2x *bp = netdev_priv(dev); init_bnx2x_cnic() local
5565 if (bp->cnic_probe) init_bnx2x_cnic()
5566 ethdev = bp->cnic_probe(dev); init_bnx2x_cnic()
5594 if (CNIC_SUPPORTS_FCOE(bp)) { init_bnx2x_cnic()
5614 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { init_bnx2x_cnic()
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_main.c293 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
299 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
301 static void __storm_memset_dma_mapping(struct bnx2x *bp, __storm_memset_dma_mapping() argument
304 REG_WR(bp, addr, U64_LO(mapping)); __storm_memset_dma_mapping()
305 REG_WR(bp, addr + 4, U64_HI(mapping)); __storm_memset_dma_mapping()
308 static void storm_memset_spq_addr(struct bnx2x *bp, storm_memset_spq_addr() argument
314 __storm_memset_dma_mapping(bp, addr, mapping); storm_memset_spq_addr()
317 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, storm_memset_vf_to_pf() argument
320 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
322 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
324 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
326 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
330 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, storm_memset_func_en() argument
333 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
335 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
337 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
339 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
343 static void storm_memset_eq_data(struct bnx2x *bp, storm_memset_eq_data() argument
351 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); storm_memset_eq_data()
354 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, storm_memset_eq_prod() argument
358 REG_WR16(bp, addr, eq_prod); storm_memset_eq_prod()
364 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) bnx2x_reg_wr_ind() argument
366 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); bnx2x_reg_wr_ind()
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); bnx2x_reg_wr_ind()
368 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_reg_wr_ind()
372 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) bnx2x_reg_rd_ind() argument
376 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); bnx2x_reg_rd_ind()
377 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); bnx2x_reg_rd_ind()
378 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_reg_rd_ind()
390 static void bnx2x_dp_dmae(struct bnx2x *bp, bnx2x_dp_dmae() argument
457 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) bnx2x_post_dmae() argument
464 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); bnx2x_post_dmae()
466 REG_WR(bp, dmae_reg_go_c[idx], 1); bnx2x_post_dmae()
480 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bnx2x_dmae_opcode() argument
490 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); bnx2x_dmae_opcode()
491 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | bnx2x_dmae_opcode()
492 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); bnx2x_dmae_opcode()
505 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, bnx2x_prep_dmae_with_comp() argument
512 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, bnx2x_prep_dmae_with_comp()
516 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); bnx2x_prep_dmae_with_comp()
517 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); bnx2x_prep_dmae_with_comp()
522 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, bnx2x_issue_dmae_with_comp() argument
525 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; bnx2x_issue_dmae_with_comp()
528 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); bnx2x_issue_dmae_with_comp()
535 spin_lock_bh(&bp->dmae_lock); bnx2x_issue_dmae_with_comp()
541 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); bnx2x_issue_dmae_with_comp()
548 (bp->recovery_state != BNX2X_RECOVERY_DONE && bnx2x_issue_dmae_with_comp()
549 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { bnx2x_issue_dmae_with_comp()
564 spin_unlock_bh(&bp->dmae_lock); bnx2x_issue_dmae_with_comp()
569 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, bnx2x_write_dmae() argument
575 if (!bp->dmae_ready) { bnx2x_write_dmae()
576 u32 *data = bnx2x_sp(bp, wb_data[0]); bnx2x_write_dmae()
578 if (CHIP_IS_E1(bp)) bnx2x_write_dmae()
579 bnx2x_init_ind_wr(bp, dst_addr, data, len32); bnx2x_write_dmae()
581 bnx2x_init_str_wr(bp, dst_addr, data, len32); bnx2x_write_dmae()
586 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); bnx2x_write_dmae()
596 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_write_dmae()
605 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) bnx2x_read_dmae() argument
610 if (!bp->dmae_ready) { bnx2x_read_dmae()
611 u32 *data = bnx2x_sp(bp, wb_data[0]); bnx2x_read_dmae()
614 if (CHIP_IS_E1(bp)) bnx2x_read_dmae()
616 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); bnx2x_read_dmae()
619 data[i] = REG_RD(bp, src_addr + i*4); bnx2x_read_dmae()
625 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); bnx2x_read_dmae()
630 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); bnx2x_read_dmae()
631 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); bnx2x_read_dmae()
635 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_read_dmae()
644 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae_phys_len() argument
647 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); bnx2x_write_dmae_phys_len()
651 bnx2x_write_dmae(bp, phys_addr + offset, bnx2x_write_dmae_phys_len()
657 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); bnx2x_write_dmae_phys_len()
671 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, bnx2x_get_assert_list_entry() argument
691 static int bnx2x_mc_assert(struct bnx2x *bp) bnx2x_mc_assert() argument
717 last_idx = REG_RD8(bp, bar_storm_intmem[storm] + bnx2x_mc_assert()
727 regs[j] = REG_RD(bp, bar_storm_intmem[storm] + bnx2x_mc_assert()
728 bnx2x_get_assert_list_entry(bp, bnx2x_mc_assert()
746 CHIP_IS_E1(bp) ? "everest1" : bnx2x_mc_assert()
747 CHIP_IS_E1H(bp) ? "everest1h" : bnx2x_mc_assert()
748 CHIP_IS_E2(bp) ? "everest2" : "everest3", bnx2x_mc_assert()
757 #define SCRATCH_BUFFER_SIZE(bp) \
758 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
760 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) bnx2x_fw_dump_lvl() argument
767 if (BP_NOMCP(bp)) { bnx2x_fw_dump_lvl()
771 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", bnx2x_fw_dump_lvl()
772 (bp->common.bc_ver & 0xff0000) >> 16, bnx2x_fw_dump_lvl()
773 (bp->common.bc_ver & 0xff00) >> 8, bnx2x_fw_dump_lvl()
774 (bp->common.bc_ver & 0xff)); bnx2x_fw_dump_lvl()
776 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); bnx2x_fw_dump_lvl()
777 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) bnx2x_fw_dump_lvl()
780 if (BP_PATH(bp) == 0) bnx2x_fw_dump_lvl()
781 trace_shmem_base = bp->common.shmem_base; bnx2x_fw_dump_lvl()
783 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); bnx2x_fw_dump_lvl()
786 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || bnx2x_fw_dump_lvl()
787 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + bnx2x_fw_dump_lvl()
788 SCRATCH_BUFFER_SIZE(bp)) { bnx2x_fw_dump_lvl()
797 mark = REG_RD(bp, addr); bnx2x_fw_dump_lvl()
805 mark = REG_RD(bp, addr); bnx2x_fw_dump_lvl()
806 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; bnx2x_fw_dump_lvl()
818 data[word] = htonl(REG_RD(bp, offset + 4*word)); bnx2x_fw_dump_lvl()
826 data[word] = htonl(REG_RD(bp, offset + 4*word)); bnx2x_fw_dump_lvl()
833 static void bnx2x_fw_dump(struct bnx2x *bp) bnx2x_fw_dump() argument
835 bnx2x_fw_dump_lvl(bp, KERN_ERR); bnx2x_fw_dump()
838 static void bnx2x_hc_int_disable(struct bnx2x *bp) bnx2x_hc_int_disable() argument
840 int port = BP_PORT(bp); bnx2x_hc_int_disable()
842 u32 val = REG_RD(bp, addr); bnx2x_hc_int_disable()
848 if (CHIP_IS_E1(bp)) { bnx2x_hc_int_disable()
853 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); bnx2x_hc_int_disable()
871 REG_WR(bp, addr, val); bnx2x_hc_int_disable()
872 if (REG_RD(bp, addr) != val) bnx2x_hc_int_disable()
876 static void bnx2x_igu_int_disable(struct bnx2x *bp) bnx2x_igu_int_disable() argument
878 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_igu_int_disable()
889 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_disable()
890 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) bnx2x_igu_int_disable()
894 static void bnx2x_int_disable(struct bnx2x *bp) bnx2x_int_disable() argument
896 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_int_disable()
897 bnx2x_hc_int_disable(bp); bnx2x_int_disable()
899 bnx2x_igu_int_disable(bp); bnx2x_int_disable()
902 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) bnx2x_panic_dump() argument
907 int func = BP_FUNC(bp); bnx2x_panic_dump()
912 if (IS_PF(bp) && disable_int) bnx2x_panic_dump()
913 bnx2x_int_disable(bp); bnx2x_panic_dump()
915 bp->stats_state = STATS_STATE_DISABLED; bnx2x_panic_dump()
916 bp->eth_stats.unrecoverable_error++; bnx2x_panic_dump()
923 if (IS_PF(bp)) { bnx2x_panic_dump()
924 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_panic_dump()
928 bp->def_idx, bp->def_att_idx, bp->attn_state, bnx2x_panic_dump()
929 bp->spq_prod_idx, bp->stats_counter); bnx2x_panic_dump()
946 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + bnx2x_panic_dump()
959 for_each_eth_queue(bp, i) { for_each_eth_queue()
960 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
965 CHIP_IS_E1x(bp) ? for_each_eth_queue()
969 CHIP_IS_E1x(bp) ? for_each_eth_queue()
976 if (!bp->fp) for_each_eth_queue()
1009 loop = CHIP_IS_E1x(bp) ?
1030 if (IS_VF(bp))
1034 data_size = CHIP_IS_E1x(bp) ?
1038 sb_data_p = CHIP_IS_E1x(bp) ?
1043 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1047 if (!CHIP_IS_E1x(bp)) {
1084 if (IS_PF(bp)) {
1086 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1088 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1091 i, bp->eq_ring[i].message.opcode,
1092 bp->eq_ring[i].message.error);
1100 for_each_valid_rx_queue(bp, i) { for_each_valid_rx_queue()
1101 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_valid_rx_queue()
1103 if (!bp->fp) for_each_valid_rx_queue()
1140 for_each_valid_tx_queue(bp, i) { for_each_valid_tx_queue()
1141 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_valid_tx_queue()
1143 if (!bp->fp) for_each_valid_tx_queue()
1178 if (IS_PF(bp)) {
1179 bnx2x_fw_dump(bp);
1180 bnx2x_mc_assert(bp);
1208 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, bnx2x_pbf_pN_buf_flushed() argument
1215 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); bnx2x_pbf_pN_buf_flushed()
1216 crd = crd_start = REG_RD(bp, regs->crd); bnx2x_pbf_pN_buf_flushed()
1217 init_crd = REG_RD(bp, regs->init_crd); bnx2x_pbf_pN_buf_flushed()
1227 crd = REG_RD(bp, regs->crd); bnx2x_pbf_pN_buf_flushed()
1228 crd_freed = REG_RD(bp, regs->crd_freed); bnx2x_pbf_pN_buf_flushed()
1243 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, bnx2x_pbf_pN_cmd_flushed() argument
1250 occup = to_free = REG_RD(bp, regs->lines_occup); bnx2x_pbf_pN_cmd_flushed()
1251 freed = freed_start = REG_RD(bp, regs->lines_freed); bnx2x_pbf_pN_cmd_flushed()
1259 occup = REG_RD(bp, regs->lines_occup); bnx2x_pbf_pN_cmd_flushed()
1260 freed = REG_RD(bp, regs->lines_freed); bnx2x_pbf_pN_cmd_flushed()
1275 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, bnx2x_flr_clnup_reg_poll() argument
1281 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) bnx2x_flr_clnup_reg_poll()
1287 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, bnx2x_flr_clnup_poll_hw_counter() argument
1290 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); bnx2x_flr_clnup_poll_hw_counter()
1299 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) bnx2x_flr_clnup_poll_count() argument
1302 if (CHIP_REV_IS_EMUL(bp)) bnx2x_flr_clnup_poll_count()
1305 if (CHIP_REV_IS_FPGA(bp)) bnx2x_flr_clnup_poll_count()
1311 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) bnx2x_tx_hw_flushed() argument
1314 {0, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1317 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1320 {1, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1323 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1326 {4, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1329 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1335 {0, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1338 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1341 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1344 {1, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1347 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1350 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1353 {4, (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1356 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1359 (CHIP_IS_E3B0(bp)) ? bnx2x_tx_hw_flushed()
1368 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); bnx2x_tx_hw_flushed()
1372 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); bnx2x_tx_hw_flushed()
1384 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) bnx2x_send_final_clnup() argument
1391 if (REG_RD(bp, comp_addr)) { bnx2x_send_final_clnup()
1402 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); bnx2x_send_final_clnup()
1404 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { bnx2x_send_final_clnup()
1407 (REG_RD(bp, comp_addr))); bnx2x_send_final_clnup()
1412 REG_WR(bp, comp_addr, 0); bnx2x_send_final_clnup()
1427 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) bnx2x_poll_hw_usage_counters() argument
1430 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1437 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1444 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1445 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), bnx2x_poll_hw_usage_counters()
1451 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1452 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), bnx2x_poll_hw_usage_counters()
1456 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1457 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), bnx2x_poll_hw_usage_counters()
1463 if (bnx2x_flr_clnup_poll_hw_counter(bp, bnx2x_poll_hw_usage_counters()
1464 dmae_reg_go_c[INIT_DMAE_C(bp)], bnx2x_poll_hw_usage_counters()
1472 static void bnx2x_hw_enable_status(struct bnx2x *bp) bnx2x_hw_enable_status() argument
1476 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); bnx2x_hw_enable_status()
1479 val = REG_RD(bp, PBF_REG_DISABLE_PF); bnx2x_hw_enable_status()
1482 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); bnx2x_hw_enable_status()
1485 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); bnx2x_hw_enable_status()
1488 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); bnx2x_hw_enable_status()
1491 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); bnx2x_hw_enable_status()
1494 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); bnx2x_hw_enable_status()
1497 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); bnx2x_hw_enable_status()
1502 static int bnx2x_pf_flr_clnup(struct bnx2x *bp) bnx2x_pf_flr_clnup() argument
1504 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); bnx2x_pf_flr_clnup()
1506 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); bnx2x_pf_flr_clnup()
1509 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); bnx2x_pf_flr_clnup()
1513 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) bnx2x_pf_flr_clnup()
1519 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) bnx2x_pf_flr_clnup()
1525 bnx2x_tx_hw_flushed(bp, poll_cnt); bnx2x_pf_flr_clnup()
1531 if (bnx2x_is_pcie_pending(bp->pdev)) bnx2x_pf_flr_clnup()
1535 bnx2x_hw_enable_status(bp); bnx2x_pf_flr_clnup()
1541 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_pf_flr_clnup()
1546 static void bnx2x_hc_int_enable(struct bnx2x *bp) bnx2x_hc_int_enable() argument
1548 int port = BP_PORT(bp); bnx2x_hc_int_enable()
1550 u32 val = REG_RD(bp, addr); bnx2x_hc_int_enable()
1551 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; bnx2x_hc_int_enable()
1552 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; bnx2x_hc_int_enable()
1553 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; bnx2x_hc_int_enable()
1573 if (!CHIP_IS_E1(bp)) { bnx2x_hc_int_enable()
1577 REG_WR(bp, addr, val); bnx2x_hc_int_enable()
1583 if (CHIP_IS_E1(bp)) bnx2x_hc_int_enable()
1584 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); bnx2x_hc_int_enable()
1590 REG_WR(bp, addr, val); bnx2x_hc_int_enable()
1597 if (!CHIP_IS_E1(bp)) { bnx2x_hc_int_enable()
1599 if (IS_MF(bp)) { bnx2x_hc_int_enable()
1600 val = (0xee0f | (1 << (BP_VN(bp) + 4))); bnx2x_hc_int_enable()
1601 if (bp->port.pmf) bnx2x_hc_int_enable()
1607 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); bnx2x_hc_int_enable()
1608 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); bnx2x_hc_int_enable()
1615 static void bnx2x_igu_int_enable(struct bnx2x *bp) bnx2x_igu_int_enable() argument
1618 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; bnx2x_igu_int_enable()
1619 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; bnx2x_igu_int_enable()
1620 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; bnx2x_igu_int_enable()
1622 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_igu_int_enable()
1646 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_enable()
1647 bnx2x_ack_int(bp); bnx2x_igu_int_enable()
1655 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_igu_int_enable()
1658 pci_intx(bp->pdev, true); bnx2x_igu_int_enable()
1663 if (IS_MF(bp)) { bnx2x_igu_int_enable()
1664 val = (0xee0f | (1 << (BP_VN(bp) + 4))); bnx2x_igu_int_enable()
1665 if (bp->port.pmf) bnx2x_igu_int_enable()
1671 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); bnx2x_igu_int_enable()
1672 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); bnx2x_igu_int_enable()
1678 void bnx2x_int_enable(struct bnx2x *bp) bnx2x_int_enable() argument
1680 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_int_enable()
1681 bnx2x_hc_int_enable(bp); bnx2x_int_enable()
1683 bnx2x_igu_int_enable(bp); bnx2x_int_enable()
1686 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) bnx2x_int_disable_sync() argument
1688 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; bnx2x_int_disable_sync()
1693 bnx2x_int_disable(bp); bnx2x_int_disable_sync()
1697 synchronize_irq(bp->msix_table[0].vector); bnx2x_int_disable_sync()
1699 if (CNIC_SUPPORT(bp)) bnx2x_int_disable_sync()
1701 for_each_eth_queue(bp, i) bnx2x_int_disable_sync()
1702 synchronize_irq(bp->msix_table[offset++].vector); bnx2x_int_disable_sync()
1704 synchronize_irq(bp->pdev->irq); bnx2x_int_disable_sync()
1707 cancel_delayed_work(&bp->sp_task); bnx2x_int_disable_sync()
1708 cancel_delayed_work(&bp->period_task); bnx2x_int_disable_sync()
1719 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_trylock_hw_lock() argument
1723 int func = BP_FUNC(bp); bnx2x_trylock_hw_lock()
1744 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); bnx2x_trylock_hw_lock()
1745 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_trylock_hw_lock()
1757 * @bp: driver handle
1762 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) bnx2x_get_leader_lock_resource() argument
1764 if (BP_PATH(bp)) bnx2x_get_leader_lock_resource()
1773 * @bp: driver handle
1777 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) bnx2x_trylock_leader_lock() argument
1779 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); bnx2x_trylock_leader_lock()
1782 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1785 static int bnx2x_schedule_sp_task(struct bnx2x *bp) bnx2x_schedule_sp_task() argument
1791 atomic_set(&bp->interrupt_occurred, 1); bnx2x_schedule_sp_task()
1800 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); bnx2x_schedule_sp_task()
1805 struct bnx2x *bp = fp->bp; bnx2x_sp_event() local
1809 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_sp_event()
1813 fp->index, cid, command, bp->state, bnx2x_sp_event()
1821 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); bnx2x_sp_event()
1866 q_obj->complete_cmd(bp, q_obj, drv_cmd)) bnx2x_sp_event()
1870 * In this case we don't want to increase the bp->spq_left bnx2x_sp_event()
1881 atomic_inc(&bp->cq_spq_left); bnx2x_sp_event()
1882 /* push the change in bp->spq_left and towards the memory */ bnx2x_sp_event()
1885 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); bnx2x_sp_event()
1888 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { bnx2x_sp_event()
1899 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); bnx2x_sp_event()
1901 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); bnx2x_sp_event()
1905 bnx2x_schedule_sp_task(bp); bnx2x_sp_event()
1913 struct bnx2x *bp = netdev_priv(dev_instance); bnx2x_interrupt() local
1914 u16 status = bnx2x_ack_int(bp); bnx2x_interrupt()
1927 if (unlikely(bp->panic)) bnx2x_interrupt()
1931 for_each_eth_queue(bp, i) { for_each_eth_queue()
1932 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
1934 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); for_each_eth_queue()
1940 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); for_each_eth_queue()
1945 if (CNIC_SUPPORT(bp)) {
1951 c_ops = rcu_dereference(bp->cnic_ops);
1952 if (c_ops && (bp->cnic_eth_dev.drv_state &
1954 c_ops->cnic_handler(bp->cnic_data, NULL);
1966 bnx2x_schedule_sp_task(bp);
1986 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_acquire_hw_lock() argument
1990 int func = BP_FUNC(bp); bnx2x_acquire_hw_lock()
2009 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_acquire_hw_lock()
2019 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); bnx2x_acquire_hw_lock()
2020 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_acquire_hw_lock()
2030 int bnx2x_release_leader_lock(struct bnx2x *bp) bnx2x_release_leader_lock() argument
2032 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); bnx2x_release_leader_lock()
2035 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) bnx2x_release_hw_lock() argument
2039 int func = BP_FUNC(bp); bnx2x_release_hw_lock()
2057 lock_status = REG_RD(bp, hw_lock_control_reg); bnx2x_release_hw_lock()
2064 REG_WR(bp, hw_lock_control_reg, resource_bit); bnx2x_release_hw_lock()
2068 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) bnx2x_get_gpio() argument
2071 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_get_gpio()
2072 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_get_gpio()
2085 gpio_reg = REG_RD(bp, MISC_REG_GPIO); bnx2x_get_gpio()
2096 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) bnx2x_set_gpio() argument
2099 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_set_gpio()
2100 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_set_gpio()
2111 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio()
2113 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); bnx2x_set_gpio()
2146 REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_set_gpio()
2147 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio()
2152 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) bnx2x_set_mult_gpio() argument
2159 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_mult_gpio()
2161 gpio_reg = REG_RD(bp, MISC_REG_GPIO); bnx2x_set_mult_gpio()
2192 REG_WR(bp, MISC_REG_GPIO, gpio_reg); bnx2x_set_mult_gpio()
2194 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_mult_gpio()
2199 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) bnx2x_set_gpio_int() argument
2202 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && bnx2x_set_gpio_int()
2203 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; bnx2x_set_gpio_int()
2214 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio_int()
2216 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); bnx2x_set_gpio_int()
2241 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); bnx2x_set_gpio_int()
2242 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); bnx2x_set_gpio_int()
2247 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) bnx2x_set_spio() argument
2257 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); bnx2x_set_spio()
2259 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); bnx2x_set_spio()
2286 REG_WR(bp, MISC_REG_SPIO, spio_reg); bnx2x_set_spio()
2287 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); bnx2x_set_spio()
2292 void bnx2x_calc_fc_adv(struct bnx2x *bp) bnx2x_calc_fc_adv() argument
2294 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_calc_fc_adv()
2296 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | bnx2x_calc_fc_adv()
2298 switch (bp->link_vars.ieee_fc & bnx2x_calc_fc_adv()
2301 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | bnx2x_calc_fc_adv()
2306 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; bnx2x_calc_fc_adv()
2314 static void bnx2x_set_requested_fc(struct bnx2x *bp) bnx2x_set_requested_fc() argument
2320 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) bnx2x_set_requested_fc()
2321 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; bnx2x_set_requested_fc()
2323 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; bnx2x_set_requested_fc()
2326 static void bnx2x_init_dropless_fc(struct bnx2x *bp) bnx2x_init_dropless_fc() argument
2330 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { bnx2x_init_dropless_fc()
2331 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) bnx2x_init_dropless_fc()
2334 REG_WR(bp, BAR_USTRORM_INTMEM + bnx2x_init_dropless_fc()
2335 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), bnx2x_init_dropless_fc()
2343 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) bnx2x_initial_phy_init() argument
2345 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_initial_phy_init()
2346 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; bnx2x_initial_phy_init()
2348 if (!BP_NOMCP(bp)) { bnx2x_initial_phy_init()
2349 bnx2x_set_requested_fc(bp); bnx2x_initial_phy_init()
2350 bnx2x_acquire_phy_lock(bp); bnx2x_initial_phy_init()
2353 struct link_params *lp = &bp->link_params; bnx2x_initial_phy_init()
2372 struct link_params *lp = &bp->link_params; bnx2x_initial_phy_init()
2376 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_initial_phy_init()
2378 bnx2x_release_phy_lock(bp); bnx2x_initial_phy_init()
2380 bnx2x_init_dropless_fc(bp); bnx2x_initial_phy_init()
2382 bnx2x_calc_fc_adv(bp); bnx2x_initial_phy_init()
2384 if (bp->link_vars.link_up) { bnx2x_initial_phy_init()
2385 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_initial_phy_init()
2386 bnx2x_link_report(bp); bnx2x_initial_phy_init()
2388 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bnx2x_initial_phy_init()
2389 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; bnx2x_initial_phy_init()
2396 void bnx2x_link_set(struct bnx2x *bp) bnx2x_link_set() argument
2398 if (!BP_NOMCP(bp)) { bnx2x_link_set()
2399 bnx2x_acquire_phy_lock(bp); bnx2x_link_set()
2400 bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_link_set()
2401 bnx2x_release_phy_lock(bp); bnx2x_link_set()
2403 bnx2x_init_dropless_fc(bp); bnx2x_link_set()
2405 bnx2x_calc_fc_adv(bp); bnx2x_link_set()
2410 static void bnx2x__link_reset(struct bnx2x *bp) bnx2x__link_reset() argument
2412 if (!BP_NOMCP(bp)) { bnx2x__link_reset()
2413 bnx2x_acquire_phy_lock(bp); bnx2x__link_reset()
2414 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x__link_reset()
2415 bnx2x_release_phy_lock(bp); bnx2x__link_reset()
2420 void bnx2x_force_link_reset(struct bnx2x *bp) bnx2x_force_link_reset() argument
2422 bnx2x_acquire_phy_lock(bp); bnx2x_force_link_reset()
2423 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_force_link_reset()
2424 bnx2x_release_phy_lock(bp); bnx2x_force_link_reset()
2427 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) bnx2x_link_test() argument
2431 if (!BP_NOMCP(bp)) { bnx2x_link_test()
2432 bnx2x_acquire_phy_lock(bp); bnx2x_link_test()
2433 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, bnx2x_link_test()
2435 bnx2x_release_phy_lock(bp); bnx2x_link_test()
2451 static void bnx2x_calc_vn_min(struct bnx2x *bp, bnx2x_calc_vn_min() argument
2457 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_calc_vn_min()
2458 u32 vn_cfg = bp->mf_config[vn]; bnx2x_calc_vn_min()
2475 if (BNX2X_IS_ETS_ENABLED(bp)) { bnx2x_calc_vn_min()
2489 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, bnx2x_calc_vn_max() argument
2493 u32 vn_cfg = bp->mf_config[vn]; bnx2x_calc_vn_max()
2498 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); bnx2x_calc_vn_max()
2500 if (IS_MF_PERCENT_BW(bp)) { bnx2x_calc_vn_max()
2502 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; bnx2x_calc_vn_max()
2513 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) bnx2x_get_cmng_fns_mode() argument
2515 if (CHIP_REV_IS_SLOW(bp)) bnx2x_get_cmng_fns_mode()
2517 if (IS_MF(bp)) bnx2x_get_cmng_fns_mode()
2523 void bnx2x_read_mf_cfg(struct bnx2x *bp) bnx2x_read_mf_cfg() argument
2525 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); bnx2x_read_mf_cfg()
2527 if (BP_NOMCP(bp)) bnx2x_read_mf_cfg()
2541 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_read_mf_cfg()
2542 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); bnx2x_read_mf_cfg()
2547 bp->mf_config[vn] = bnx2x_read_mf_cfg()
2548 MF_CFG_RD(bp, func_mf_config[func].config); bnx2x_read_mf_cfg()
2550 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { bnx2x_read_mf_cfg()
2552 bp->flags |= MF_FUNC_DIS; bnx2x_read_mf_cfg()
2555 bp->flags &= ~MF_FUNC_DIS; bnx2x_read_mf_cfg()
2559 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) bnx2x_cmng_fns_init() argument
2564 input.port_rate = bp->link_vars.line_speed; bnx2x_cmng_fns_init()
2571 bnx2x_read_mf_cfg(bp); bnx2x_cmng_fns_init()
2574 bnx2x_calc_vn_min(bp, &input); bnx2x_cmng_fns_init()
2577 if (bp->port.pmf) bnx2x_cmng_fns_init()
2578 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) bnx2x_cmng_fns_init()
2579 bnx2x_calc_vn_max(bp, vn, &input); bnx2x_cmng_fns_init()
2585 bnx2x_init_cmng(&input, &bp->cmng); bnx2x_cmng_fns_init()
2594 static void storm_memset_cmng(struct bnx2x *bp, storm_memset_cmng() argument
2604 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); storm_memset_cmng()
2606 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { storm_memset_cmng()
2607 int func = func_by_vn(bp, vn); storm_memset_cmng()
2612 __storm_memset_struct(bp, addr, size, storm_memset_cmng()
2618 __storm_memset_struct(bp, addr, size, storm_memset_cmng()
2624 void bnx2x_set_local_cmng(struct bnx2x *bp) bnx2x_set_local_cmng() argument
2626 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); bnx2x_set_local_cmng()
2629 bnx2x_cmng_fns_init(bp, false, cmng_fns); bnx2x_set_local_cmng()
2630 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_set_local_cmng()
2639 static void bnx2x_link_attn(struct bnx2x *bp) bnx2x_link_attn() argument
2642 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_link_attn()
2644 bnx2x_link_update(&bp->link_params, &bp->link_vars); bnx2x_link_attn()
2646 bnx2x_init_dropless_fc(bp); bnx2x_link_attn()
2648 if (bp->link_vars.link_up) { bnx2x_link_attn()
2650 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { bnx2x_link_attn()
2653 pstats = bnx2x_sp(bp, port_stats); bnx2x_link_attn()
2658 if (bp->state == BNX2X_STATE_OPEN) bnx2x_link_attn()
2659 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_attn()
2662 if (bp->link_vars.link_up && bp->link_vars.line_speed) bnx2x_link_attn()
2663 bnx2x_set_local_cmng(bp); bnx2x_link_attn()
2665 __bnx2x_link_report(bp); bnx2x_link_attn()
2667 if (IS_MF(bp)) bnx2x_link_attn()
2668 bnx2x_link_sync_notify(bp); bnx2x_link_attn()
2671 void bnx2x__link_status_update(struct bnx2x *bp) bnx2x__link_status_update() argument
2673 if (bp->state != BNX2X_STATE_OPEN) bnx2x__link_status_update()
2677 if (IS_PF(bp)) { bnx2x__link_status_update()
2678 bnx2x_dcbx_pmf_update(bp); bnx2x__link_status_update()
2679 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); bnx2x__link_status_update()
2680 if (bp->link_vars.link_up) bnx2x__link_status_update()
2681 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x__link_status_update()
2683 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x__link_status_update()
2685 bnx2x_link_report(bp); bnx2x__link_status_update()
2688 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | bnx2x__link_status_update()
2700 bp->port.advertising[0] = bp->port.supported[0]; bnx2x__link_status_update()
2702 bp->link_params.bp = bp; bnx2x__link_status_update()
2703 bp->link_params.port = BP_PORT(bp); bnx2x__link_status_update()
2704 bp->link_params.req_duplex[0] = DUPLEX_FULL; bnx2x__link_status_update()
2705 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; bnx2x__link_status_update()
2706 bp->link_params.req_line_speed[0] = SPEED_10000; bnx2x__link_status_update()
2707 bp->link_params.speed_cap_mask[0] = 0x7f0000; bnx2x__link_status_update()
2708 bp->link_params.switch_cfg = SWITCH_CFG_10G; bnx2x__link_status_update()
2709 bp->link_vars.mac_type = MAC_TYPE_BMAC; bnx2x__link_status_update()
2710 bp->link_vars.line_speed = SPEED_10000; bnx2x__link_status_update()
2711 bp->link_vars.link_status = bnx2x__link_status_update()
2714 bp->link_vars.link_up = 1; bnx2x__link_status_update()
2715 bp->link_vars.duplex = DUPLEX_FULL; bnx2x__link_status_update()
2716 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; bnx2x__link_status_update()
2717 __bnx2x_link_report(bp); bnx2x__link_status_update()
2719 bnx2x_sample_bulletin(bp); bnx2x__link_status_update()
2726 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x__link_status_update()
2730 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, bnx2x_afex_func_update() argument
2737 func_params.f_obj = &bp->func_obj; bnx2x_afex_func_update()
2749 if (bnx2x_func_state_change(bp, &func_params) < 0) bnx2x_afex_func_update()
2750 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); bnx2x_afex_func_update()
2755 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, bnx2x_afex_handle_vif_list_cmd() argument
2769 func_params.f_obj = &bp->func_obj; bnx2x_afex_handle_vif_list_cmd()
2786 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_afex_handle_vif_list_cmd()
2788 bnx2x_fw_command(bp, drv_msg_code, 0); bnx2x_afex_handle_vif_list_cmd()
2793 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) bnx2x_handle_afex_cmd() argument
2796 u32 func = BP_ABS_FUNC(bp); bnx2x_handle_afex_cmd()
2806 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd()
2809 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); bnx2x_handle_afex_cmd()
2813 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd()
2814 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd()
2818 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, bnx2x_handle_afex_cmd()
2823 addr_to_write = SHMEM2_RD(bp, bnx2x_handle_afex_cmd()
2824 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd()
2825 stats_type = SHMEM2_RD(bp, bnx2x_handle_afex_cmd()
2826 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); bnx2x_handle_afex_cmd()
2832 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); bnx2x_handle_afex_cmd()
2836 REG_WR(bp, addr_to_write + i*sizeof(u32), bnx2x_handle_afex_cmd()
2840 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); bnx2x_handle_afex_cmd()
2844 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); bnx2x_handle_afex_cmd()
2845 bp->mf_config[BP_VN(bp)] = mf_config; bnx2x_handle_afex_cmd()
2857 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); bnx2x_handle_afex_cmd()
2859 bp->mf_config[BP_VN(bp)] = mf_config; bnx2x_handle_afex_cmd()
2861 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); bnx2x_handle_afex_cmd()
2863 cmng_input.vnic_max_rate[BP_VN(bp)]; bnx2x_handle_afex_cmd()
2868 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); bnx2x_handle_afex_cmd()
2872 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_afex_cmd()
2876 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_afex_cmd()
2884 (MF_CFG_RD(bp, bnx2x_handle_afex_cmd()
2889 (MF_CFG_RD(bp, bnx2x_handle_afex_cmd()
2895 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, bnx2x_handle_afex_cmd()
2899 bp->afex_def_vlan_tag = vlan_val; bnx2x_handle_afex_cmd()
2900 bp->afex_vlan_mode = vlan_mode; bnx2x_handle_afex_cmd()
2903 bnx2x_link_report(bp); bnx2x_handle_afex_cmd()
2906 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); bnx2x_handle_afex_cmd()
2909 bp->afex_def_vlan_tag = -1; bnx2x_handle_afex_cmd()
2914 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) bnx2x_handle_update_svid_cmd() argument
2921 func_params.f_obj = &bp->func_obj; bnx2x_handle_update_svid_cmd()
2924 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { bnx2x_handle_update_svid_cmd()
2925 int func = BP_ABS_FUNC(bp); bnx2x_handle_update_svid_cmd()
2929 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_handle_update_svid_cmd()
2932 bp->mf_ov = val; bnx2x_handle_update_svid_cmd()
2939 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, bnx2x_handle_update_svid_cmd()
2940 bp->mf_ov); bnx2x_handle_update_svid_cmd()
2945 switch_update_params->vlan = bp->mf_ov; bnx2x_handle_update_svid_cmd()
2947 if (bnx2x_func_state_change(bp, &func_params) < 0) { bnx2x_handle_update_svid_cmd()
2949 bp->mf_ov); bnx2x_handle_update_svid_cmd()
2953 bp->mf_ov); bnx2x_handle_update_svid_cmd()
2959 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); bnx2x_handle_update_svid_cmd()
2962 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); bnx2x_handle_update_svid_cmd()
2965 static void bnx2x_pmf_update(struct bnx2x *bp) bnx2x_pmf_update() argument
2967 int port = BP_PORT(bp); bnx2x_pmf_update()
2970 bp->port.pmf = 1; bnx2x_pmf_update()
2971 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); bnx2x_pmf_update()
2975 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). bnx2x_pmf_update()
2980 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bnx2x_pmf_update()
2982 bnx2x_dcbx_pmf_update(bp); bnx2x_pmf_update()
2985 val = (0xff0f | (1 << (BP_VN(bp) + 4))); bnx2x_pmf_update()
2986 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_pmf_update()
2987 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); bnx2x_pmf_update()
2988 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); bnx2x_pmf_update()
2989 } else if (!CHIP_IS_E1x(bp)) { bnx2x_pmf_update()
2990 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); bnx2x_pmf_update()
2991 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); bnx2x_pmf_update()
2994 bnx2x_stats_handle(bp, STATS_EVENT_PMF); bnx2x_pmf_update()
3006 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) bnx2x_fw_command() argument
3008 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_fw_command()
3012 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; bnx2x_fw_command()
3014 mutex_lock(&bp->fw_mb_mutex); bnx2x_fw_command()
3015 seq = ++bp->fw_seq; bnx2x_fw_command()
3016 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); bnx2x_fw_command()
3017 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); bnx2x_fw_command()
3026 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); bnx2x_fw_command()
3040 bnx2x_fw_dump(bp); bnx2x_fw_command()
3043 mutex_unlock(&bp->fw_mb_mutex); bnx2x_fw_command()
3048 static void storm_memset_func_cfg(struct bnx2x *bp, storm_memset_func_cfg() argument
3057 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); storm_memset_func_cfg()
3060 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) bnx2x_func_init() argument
3062 if (CHIP_IS_E1x(bp)) { bnx2x_func_init()
3065 storm_memset_func_cfg(bp, &tcfg, p->func_id); bnx2x_func_init()
3069 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); bnx2x_func_init()
3070 storm_memset_func_en(bp, p->func_id, 1); bnx2x_func_init()
3074 storm_memset_spq_addr(bp, p->spq_map, p->func_id); bnx2x_func_init()
3075 REG_WR(bp, XSEM_REG_FAST_MEMORY + bnx2x_func_init()
3083 * @bp device handle
3089 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, bnx2x_get_common_flags() argument
3107 if (bp->flags & TX_SWITCHING) bnx2x_get_common_flags()
3120 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, bnx2x_get_q_flags() argument
3127 if (IS_MF_SD(bp)) bnx2x_get_q_flags()
3152 if (IS_MF_AFEX(bp)) bnx2x_get_q_flags()
3155 return flags | bnx2x_get_common_flags(bp, fp, true); bnx2x_get_q_flags()
3158 static void bnx2x_pf_q_prep_general(struct bnx2x *bp, bnx2x_pf_q_prep_general() argument
3169 gen_init->mtu = bp->dev->mtu; bnx2x_pf_q_prep_general()
3176 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, bnx2x_pf_rx_q_prep() argument
3185 pause->sge_th_lo = SGE_TH_LO(bp); bnx2x_pf_rx_q_prep()
3186 pause->sge_th_hi = SGE_TH_HI(bp); bnx2x_pf_rx_q_prep()
3189 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep()
3194 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> bnx2x_pf_rx_q_prep()
3202 if (!CHIP_IS_E1(bp)) { bnx2x_pf_rx_q_prep()
3203 pause->bd_th_lo = BD_TH_LO(bp); bnx2x_pf_rx_q_prep()
3204 pause->bd_th_hi = BD_TH_HI(bp); bnx2x_pf_rx_q_prep()
3206 pause->rcq_th_lo = RCQ_TH_LO(bp); bnx2x_pf_rx_q_prep()
3207 pause->rcq_th_hi = RCQ_TH_HI(bp); bnx2x_pf_rx_q_prep()
3212 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep()
3214 bp->rx_ring_size); bnx2x_pf_rx_q_prep()
3215 WARN_ON(bp->dropless_fc && bnx2x_pf_rx_q_prep()
3238 rxq_init->rss_engine_id = BP_FUNC(bp); bnx2x_pf_rx_q_prep()
3239 rxq_init->mcast_engine_id = BP_FUNC(bp); bnx2x_pf_rx_q_prep()
3246 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); bnx2x_pf_rx_q_prep()
3258 if (IS_MF_AFEX(bp)) { bnx2x_pf_rx_q_prep()
3259 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; bnx2x_pf_rx_q_prep()
3264 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, bnx2x_pf_tx_q_prep() argument
3277 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); bnx2x_pf_tx_q_prep()
3285 static void bnx2x_pf_init(struct bnx2x *bp) bnx2x_pf_init() argument
3290 if (!CHIP_IS_E1x(bp)) { bnx2x_pf_init()
3293 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + bnx2x_pf_init()
3295 (CHIP_MODE_IS_4_PORT(bp) ? bnx2x_pf_init()
3296 BP_FUNC(bp) : BP_VN(bp))*4, 0); bnx2x_pf_init()
3298 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + bnx2x_pf_init()
3301 (CHIP_MODE_IS_4_PORT(bp) ? bnx2x_pf_init()
3302 BP_FUNC(bp) : BP_VN(bp))*4, 0); bnx2x_pf_init()
3306 func_init.pf_id = BP_FUNC(bp); bnx2x_pf_init()
3307 func_init.func_id = BP_FUNC(bp); bnx2x_pf_init()
3308 func_init.spq_map = bp->spq_mapping; bnx2x_pf_init()
3309 func_init.spq_prod = bp->spq_prod_idx; bnx2x_pf_init()
3311 bnx2x_func_init(bp, &func_init); bnx2x_pf_init()
3313 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); bnx2x_pf_init()
3321 bp->link_vars.line_speed = SPEED_10000; bnx2x_pf_init()
3322 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); bnx2x_pf_init()
3325 if (bp->port.pmf) bnx2x_pf_init()
3326 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_pf_init()
3329 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); bnx2x_pf_init()
3330 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); bnx2x_pf_init()
3331 eq_data.producer = bp->eq_prod; bnx2x_pf_init()
3334 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); bnx2x_pf_init()
3337 static void bnx2x_e1h_disable(struct bnx2x *bp) bnx2x_e1h_disable() argument
3339 int port = BP_PORT(bp); bnx2x_e1h_disable()
3341 bnx2x_tx_disable(bp); bnx2x_e1h_disable()
3343 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); bnx2x_e1h_disable()
3346 static void bnx2x_e1h_enable(struct bnx2x *bp) bnx2x_e1h_enable() argument
3348 int port = BP_PORT(bp); bnx2x_e1h_enable()
3350 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) bnx2x_e1h_enable()
3351 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); bnx2x_e1h_enable()
3354 netif_tx_wake_all_queues(bp->dev); bnx2x_e1h_enable()
3364 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) bnx2x_drv_info_ether_stat() argument
3367 &bp->slowpath->drv_info_to_mcp.ether_stat; bnx2x_drv_info_ether_stat()
3369 &bp->sp_objs->mac_obj; bnx2x_drv_info_ether_stat()
3386 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, bnx2x_drv_info_ether_stat()
3390 ether_stat->mtu_size = bp->dev->mtu; bnx2x_drv_info_ether_stat()
3391 if (bp->dev->features & NETIF_F_RXCSUM) bnx2x_drv_info_ether_stat()
3393 if (bp->dev->features & NETIF_F_TSO) bnx2x_drv_info_ether_stat()
3395 ether_stat->feature_flags |= bp->common.boot_mode; bnx2x_drv_info_ether_stat()
3397 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; bnx2x_drv_info_ether_stat()
3399 ether_stat->txq_size = bp->tx_ring_size; bnx2x_drv_info_ether_stat()
3400 ether_stat->rxq_size = bp->rx_ring_size; bnx2x_drv_info_ether_stat()
3403 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; bnx2x_drv_info_ether_stat()
3407 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) bnx2x_drv_info_fcoe_stat() argument
3409 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; bnx2x_drv_info_fcoe_stat()
3411 &bp->slowpath->drv_info_to_mcp.fcoe_stat; bnx2x_drv_info_fcoe_stat()
3413 if (!CNIC_LOADED(bp)) bnx2x_drv_info_fcoe_stat()
3416 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); bnx2x_drv_info_fcoe_stat()
3422 if (!NO_FCOE(bp)) { bnx2x_drv_info_fcoe_stat()
3424 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. bnx2x_drv_info_fcoe_stat()
3428 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. bnx2x_drv_info_fcoe_stat()
3432 &bp->fw_stats_data->fcoe; bnx2x_drv_info_fcoe_stat()
3506 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); bnx2x_drv_info_fcoe_stat()
3509 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) bnx2x_drv_info_iscsi_stat() argument
3511 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; bnx2x_drv_info_iscsi_stat()
3513 &bp->slowpath->drv_info_to_mcp.iscsi_stat; bnx2x_drv_info_iscsi_stat()
3515 if (!CNIC_LOADED(bp)) bnx2x_drv_info_iscsi_stat()
3518 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, bnx2x_drv_info_iscsi_stat()
3525 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); bnx2x_drv_info_iscsi_stat()
3533 static void bnx2x_config_mf_bw(struct bnx2x *bp) bnx2x_config_mf_bw() argument
3535 if (bp->link_vars.link_up) { bnx2x_config_mf_bw()
3536 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_config_mf_bw()
3537 bnx2x_link_sync_notify(bp); bnx2x_config_mf_bw()
3539 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); bnx2x_config_mf_bw()
3542 static void bnx2x_set_mf_bw(struct bnx2x *bp) bnx2x_set_mf_bw() argument
3544 bnx2x_config_mf_bw(bp); bnx2x_set_mf_bw()
3545 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); bnx2x_set_mf_bw()
3548 static void bnx2x_handle_eee_event(struct bnx2x *bp) bnx2x_handle_eee_event() argument
3551 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); bnx2x_handle_eee_event()
3557 static void bnx2x_handle_drv_info_req(struct bnx2x *bp) bnx2x_handle_drv_info_req() argument
3560 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); bnx2x_handle_drv_info_req()
3566 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); bnx2x_handle_drv_info_req()
3574 mutex_lock(&bp->drv_info_mutex); bnx2x_handle_drv_info_req()
3576 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_handle_drv_info_req()
3581 bnx2x_drv_info_ether_stat(bp); bnx2x_handle_drv_info_req()
3584 bnx2x_drv_info_fcoe_stat(bp); bnx2x_handle_drv_info_req()
3587 bnx2x_drv_info_iscsi_stat(bp); bnx2x_handle_drv_info_req()
3591 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); bnx2x_handle_drv_info_req()
3598 SHMEM2_WR(bp, drv_info_host_addr_lo, bnx2x_handle_drv_info_req()
3599 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_handle_drv_info_req()
3600 SHMEM2_WR(bp, drv_info_host_addr_hi, bnx2x_handle_drv_info_req()
3601 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); bnx2x_handle_drv_info_req()
3603 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); bnx2x_handle_drv_info_req()
3609 if (!SHMEM2_HAS(bp, mfw_drv_indication)) { bnx2x_handle_drv_info_req()
3611 } else if (!bp->drv_info_mng_owner) { bnx2x_handle_drv_info_req()
3612 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); bnx2x_handle_drv_info_req()
3615 u32 indication = SHMEM2_RD(bp, mfw_drv_indication); bnx2x_handle_drv_info_req()
3619 SHMEM2_WR(bp, mfw_drv_indication, bnx2x_handle_drv_info_req()
3630 bp->drv_info_mng_owner = true; bnx2x_handle_drv_info_req()
3634 mutex_unlock(&bp->drv_info_mutex); bnx2x_handle_drv_info_req()
3658 void bnx2x_update_mng_version(struct bnx2x *bp) bnx2x_update_mng_version() argument
3663 int idx = BP_FW_MB_IDX(bp); bnx2x_update_mng_version()
3666 if (!SHMEM2_HAS(bp, func_os_drv_ver)) bnx2x_update_mng_version()
3669 mutex_lock(&bp->drv_info_mutex); bnx2x_update_mng_version()
3671 if (bp->drv_info_mng_owner) bnx2x_update_mng_version()
3674 if (bp->state != BNX2X_STATE_OPEN) bnx2x_update_mng_version()
3679 if (!CNIC_LOADED(bp)) bnx2x_update_mng_version()
3683 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_update_mng_version()
3685 bnx2x_drv_info_iscsi_stat(bp); bnx2x_update_mng_version()
3686 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; bnx2x_update_mng_version()
3689 memset(&bp->slowpath->drv_info_to_mcp, 0, bnx2x_update_mng_version()
3691 bnx2x_drv_info_fcoe_stat(bp); bnx2x_update_mng_version()
3692 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; bnx2x_update_mng_version()
3696 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); bnx2x_update_mng_version()
3697 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); bnx2x_update_mng_version()
3698 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); bnx2x_update_mng_version()
3700 mutex_unlock(&bp->drv_info_mutex); bnx2x_update_mng_version()
3706 void bnx2x_update_mfw_dump(struct bnx2x *bp) bnx2x_update_mfw_dump() argument
3711 if (!SHMEM2_HAS(bp, drv_info)) bnx2x_update_mfw_dump()
3715 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds()); bnx2x_update_mfw_dump()
3718 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); bnx2x_update_mfw_dump()
3720 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); bnx2x_update_mfw_dump()
3723 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); bnx2x_update_mfw_dump()
3732 static void bnx2x_oem_event(struct bnx2x *bp, u32 event) bnx2x_oem_event() argument
3756 * where the bp->flags can change so it is done without any bnx2x_oem_event()
3759 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { bnx2x_oem_event()
3761 bp->flags |= MF_FUNC_DIS; bnx2x_oem_event()
3763 bnx2x_e1h_disable(bp); bnx2x_oem_event()
3766 bp->flags &= ~MF_FUNC_DIS; bnx2x_oem_event()
3768 bnx2x_e1h_enable(bp); bnx2x_oem_event()
3776 bnx2x_config_mf_bw(bp); bnx2x_oem_event()
3783 bnx2x_fw_command(bp, cmd_fail, 0); bnx2x_oem_event()
3785 bnx2x_fw_command(bp, cmd_ok, 0); bnx2x_oem_event()
3789 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) bnx2x_sp_get_next() argument
3791 struct eth_spe *next_spe = bp->spq_prod_bd; bnx2x_sp_get_next()
3793 if (bp->spq_prod_bd == bp->spq_last_bd) { bnx2x_sp_get_next()
3794 bp->spq_prod_bd = bp->spq; bnx2x_sp_get_next()
3795 bp->spq_prod_idx = 0; bnx2x_sp_get_next()
3798 bp->spq_prod_bd++; bnx2x_sp_get_next()
3799 bp->spq_prod_idx++; bnx2x_sp_get_next()
3805 static void bnx2x_sp_prod_update(struct bnx2x *bp) bnx2x_sp_prod_update() argument
3807 int func = BP_FUNC(bp); bnx2x_sp_prod_update()
3816 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bnx2x_sp_prod_update()
3817 bp->spq_prod_idx); bnx2x_sp_prod_update()
3844 * @bp: driver handle
3855 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, bnx2x_sp_post() argument
3863 if (unlikely(bp->panic)) { bnx2x_sp_post()
3869 spin_lock_bh(&bp->spq_lock); bnx2x_sp_post()
3872 if (!atomic_read(&bp->eq_spq_left)) { bnx2x_sp_post()
3874 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post()
3878 } else if (!atomic_read(&bp->cq_spq_left)) { bnx2x_sp_post()
3880 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post()
3885 spe = bnx2x_sp_get_next(bp); bnx2x_sp_post()
3890 HW_CID(bp, cid)); bnx2x_sp_post()
3899 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & bnx2x_sp_post()
3916 atomic_dec(&bp->eq_spq_left); bnx2x_sp_post()
3918 atomic_dec(&bp->cq_spq_left); bnx2x_sp_post()
3922 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), bnx2x_sp_post()
3923 (u32)(U64_LO(bp->spq_mapping) + bnx2x_sp_post()
3924 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, bnx2x_sp_post()
3925 HW_CID(bp, cid), data_hi, data_lo, type, bnx2x_sp_post()
3926 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); bnx2x_sp_post()
3928 bnx2x_sp_prod_update(bp); bnx2x_sp_post()
3929 spin_unlock_bh(&bp->spq_lock); bnx2x_sp_post()
3934 static int bnx2x_acquire_alr(struct bnx2x *bp) bnx2x_acquire_alr() argument
3941 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); bnx2x_acquire_alr()
3942 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); bnx2x_acquire_alr()
3957 static void bnx2x_release_alr(struct bnx2x *bp) bnx2x_release_alr() argument
3959 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); bnx2x_release_alr()
3965 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) bnx2x_update_dsb_idx() argument
3967 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_update_dsb_idx()
3971 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { bnx2x_update_dsb_idx()
3972 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; bnx2x_update_dsb_idx()
3976 if (bp->def_idx != def_sb->sp_sb.running_index) { bnx2x_update_dsb_idx()
3977 bp->def_idx = def_sb->sp_sb.running_index; bnx2x_update_dsb_idx()
3990 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) bnx2x_attn_int_asserted() argument
3992 int port = BP_PORT(bp); bnx2x_attn_int_asserted()
4001 if (bp->attn_state & asserted) bnx2x_attn_int_asserted()
4004 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_asserted()
4005 aeu_mask = REG_RD(bp, aeu_addr); bnx2x_attn_int_asserted()
4012 REG_WR(bp, aeu_addr, aeu_mask); bnx2x_attn_int_asserted()
4013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_asserted()
4015 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bnx2x_attn_int_asserted()
4016 bp->attn_state |= asserted; bnx2x_attn_int_asserted()
4017 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); bnx2x_attn_int_asserted()
4022 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_asserted()
4025 nig_mask = REG_RD(bp, nig_int_mask_addr); bnx2x_attn_int_asserted()
4031 REG_WR(bp, nig_int_mask_addr, 0); bnx2x_attn_int_asserted()
4033 bnx2x_link_attn(bp); bnx2x_attn_int_asserted()
4053 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); bnx2x_attn_int_asserted()
4057 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); bnx2x_attn_int_asserted()
4061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); bnx2x_attn_int_asserted()
4066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); bnx2x_attn_int_asserted()
4070 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); bnx2x_attn_int_asserted()
4074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); bnx2x_attn_int_asserted()
4080 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_attn_int_asserted()
4087 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); bnx2x_attn_int_asserted()
4088 REG_WR(bp, reg_addr, asserted); bnx2x_attn_int_asserted()
4095 if (bp->common.int_block != INT_BLOCK_HC) { bnx2x_attn_int_asserted()
4098 igu_acked = REG_RD(bp, bnx2x_attn_int_asserted()
4107 REG_WR(bp, nig_int_mask_addr, nig_mask); bnx2x_attn_int_asserted()
4108 bnx2x_release_phy_lock(bp); bnx2x_attn_int_asserted()
4112 static void bnx2x_fan_failure(struct bnx2x *bp) bnx2x_fan_failure() argument
4114 int port = BP_PORT(bp); bnx2x_fan_failure()
4118 SHMEM_RD(bp, bnx2x_fan_failure()
4123 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, bnx2x_fan_failure()
4127 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" bnx2x_fan_failure()
4134 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); bnx2x_fan_failure()
4137 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted0() argument
4139 int port = BP_PORT(bp); bnx2x_attn_int_deasserted0()
4148 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted0()
4150 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted0()
4155 bnx2x_hw_reset_phy(&bp->link_params); bnx2x_attn_int_deasserted0()
4156 bnx2x_fan_failure(bp); bnx2x_attn_int_deasserted0()
4159 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { bnx2x_attn_int_deasserted0()
4160 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_deasserted0()
4161 bnx2x_handle_module_detect_int(&bp->link_params); bnx2x_attn_int_deasserted0()
4162 bnx2x_release_phy_lock(bp); bnx2x_attn_int_deasserted0()
4167 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted0()
4169 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted0()
4177 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted1() argument
4183 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); bnx2x_attn_int_deasserted1()
4192 int port = BP_PORT(bp); bnx2x_attn_int_deasserted1()
4198 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted1()
4200 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted1()
4208 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted2() argument
4214 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); bnx2x_attn_int_deasserted2()
4222 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); bnx2x_attn_int_deasserted2()
4228 if (!CHIP_IS_E1x(bp)) { bnx2x_attn_int_deasserted2()
4229 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); bnx2x_attn_int_deasserted2()
4236 int port = BP_PORT(bp); bnx2x_attn_int_deasserted2()
4242 val = REG_RD(bp, reg_offset); bnx2x_attn_int_deasserted2()
4244 REG_WR(bp, reg_offset, val); bnx2x_attn_int_deasserted2()
4252 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted3() argument
4259 int func = BP_FUNC(bp); bnx2x_attn_int_deasserted3()
4261 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_attn_int_deasserted3()
4262 bnx2x_read_mf_cfg(bp); bnx2x_attn_int_deasserted3()
4263 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, bnx2x_attn_int_deasserted3()
4264 func_mf_config[BP_ABS_FUNC(bp)].config); bnx2x_attn_int_deasserted3()
4265 val = SHMEM_RD(bp, bnx2x_attn_int_deasserted3()
4266 func_mb[BP_FW_MB_IDX(bp)].drv_status); bnx2x_attn_int_deasserted3()
4270 bnx2x_oem_event(bp, bnx2x_attn_int_deasserted3()
4275 bnx2x_set_mf_bw(bp); bnx2x_attn_int_deasserted3()
4278 bnx2x_handle_drv_info_req(bp); bnx2x_attn_int_deasserted3()
4281 bnx2x_schedule_iov_task(bp, bnx2x_attn_int_deasserted3()
4284 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_attn_int_deasserted3()
4285 bnx2x_pmf_update(bp); bnx2x_attn_int_deasserted3()
4287 if (bp->port.pmf && bnx2x_attn_int_deasserted3()
4289 bp->dcbx_enabled > 0) bnx2x_attn_int_deasserted3()
4291 bnx2x_dcbx_set_params(bp, bnx2x_attn_int_deasserted3()
4294 bnx2x_handle_afex_cmd(bp, bnx2x_attn_int_deasserted3()
4297 bnx2x_handle_eee_event(bp); bnx2x_attn_int_deasserted3()
4300 bnx2x_handle_update_svid_cmd(bp); bnx2x_attn_int_deasserted3()
4302 if (bp->link_vars.periodic_flags & bnx2x_attn_int_deasserted3()
4305 bnx2x_acquire_phy_lock(bp); bnx2x_attn_int_deasserted3()
4306 bp->link_vars.periodic_flags &= bnx2x_attn_int_deasserted3()
4308 bnx2x_release_phy_lock(bp); bnx2x_attn_int_deasserted3()
4309 if (IS_MF(bp)) bnx2x_attn_int_deasserted3()
4310 bnx2x_link_sync_notify(bp); bnx2x_attn_int_deasserted3()
4311 bnx2x_link_report(bp); bnx2x_attn_int_deasserted3()
4316 bnx2x__link_status_update(bp); bnx2x_attn_int_deasserted3()
4320 bnx2x_mc_assert(bp); bnx2x_attn_int_deasserted3()
4321 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); bnx2x_attn_int_deasserted3()
4322 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); bnx2x_attn_int_deasserted3()
4323 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); bnx2x_attn_int_deasserted3()
4324 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); bnx2x_attn_int_deasserted3()
4330 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); bnx2x_attn_int_deasserted3()
4331 bnx2x_fw_dump(bp); bnx2x_attn_int_deasserted3()
4340 val = CHIP_IS_E1(bp) ? 0 : bnx2x_attn_int_deasserted3()
4341 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); bnx2x_attn_int_deasserted3()
4345 val = CHIP_IS_E1(bp) ? 0 : bnx2x_attn_int_deasserted3()
4346 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); bnx2x_attn_int_deasserted3()
4349 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); bnx2x_attn_int_deasserted3()
4382 void bnx2x_set_reset_global(struct bnx2x *bp) bnx2x_set_reset_global() argument
4385 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_global()
4386 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_global()
4387 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); bnx2x_set_reset_global()
4388 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_global()
4396 static void bnx2x_clear_reset_global(struct bnx2x *bp) bnx2x_clear_reset_global() argument
4399 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_reset_global()
4400 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_clear_reset_global()
4401 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); bnx2x_clear_reset_global()
4402 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_reset_global()
4410 static bool bnx2x_reset_is_global(struct bnx2x *bp) bnx2x_reset_is_global() argument
4412 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_reset_is_global()
4423 static void bnx2x_set_reset_done(struct bnx2x *bp) bnx2x_set_reset_done() argument
4426 u32 bit = BP_PATH(bp) ? bnx2x_set_reset_done()
4428 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_done()
4429 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_done()
4433 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_reset_done()
4435 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_done()
4443 void bnx2x_set_reset_in_progress(struct bnx2x *bp) bnx2x_set_reset_in_progress() argument
4446 u32 bit = BP_PATH(bp) ? bnx2x_set_reset_in_progress()
4448 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_in_progress()
4449 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_reset_in_progress()
4453 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_reset_in_progress()
4454 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_reset_in_progress()
4461 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) bnx2x_reset_is_done() argument
4463 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_reset_is_done()
4476 void bnx2x_set_pf_load(struct bnx2x *bp) bnx2x_set_pf_load() argument
4479 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : bnx2x_set_pf_load()
4481 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : bnx2x_set_pf_load()
4484 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_pf_load()
4485 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_set_pf_load()
4493 val1 |= (1 << bp->pf_num); bnx2x_set_pf_load()
4501 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_set_pf_load()
4502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_set_pf_load()
4508 * @bp: driver handle
4514 bool bnx2x_clear_pf_load(struct bnx2x *bp) bnx2x_clear_pf_load() argument
4517 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : bnx2x_clear_pf_load()
4519 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : bnx2x_clear_pf_load()
4522 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_pf_load()
4523 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_clear_pf_load()
4530 val1 &= ~(1 << bp->pf_num); bnx2x_clear_pf_load()
4538 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); bnx2x_clear_pf_load()
4539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); bnx2x_clear_pf_load()
4548 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) bnx2x_get_load_status() argument
4554 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); bnx2x_get_load_status()
4566 static void _print_parity(struct bnx2x *bp, u32 reg) _print_parity() argument
4568 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); _print_parity()
4576 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity0() argument
4594 _print_parity(bp, bnx2x_check_blocks_with_parity0()
4600 _print_parity(bp, PRS_REG_PRS_PRTY_STS); bnx2x_check_blocks_with_parity0()
4604 _print_parity(bp, bnx2x_check_blocks_with_parity0()
4610 _print_parity(bp, SRC_REG_SRC_PRTY_STS); bnx2x_check_blocks_with_parity0()
4614 _print_parity(bp, TCM_REG_TCM_PRTY_STS); bnx2x_check_blocks_with_parity0()
4619 _print_parity(bp, bnx2x_check_blocks_with_parity0()
4621 _print_parity(bp, bnx2x_check_blocks_with_parity0()
4626 _print_parity(bp, GRCBASE_XPB + bnx2x_check_blocks_with_parity0()
4640 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity1() argument
4658 _print_parity(bp, PBF_REG_PBF_PRTY_STS); bnx2x_check_blocks_with_parity1()
4664 _print_parity(bp, QM_REG_QM_PRTY_STS); bnx2x_check_blocks_with_parity1()
4670 _print_parity(bp, TM_REG_TM_PRTY_STS); bnx2x_check_blocks_with_parity1()
4676 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4683 _print_parity(bp, XCM_REG_XCM_PRTY_STS); bnx2x_check_blocks_with_parity1()
4690 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4692 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4700 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4707 if (CHIP_IS_E1x(bp)) { bnx2x_check_blocks_with_parity1()
4708 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4711 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4713 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4728 _print_parity(bp, DBG_REG_DBG_PRTY_STS); bnx2x_check_blocks_with_parity1()
4734 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4741 _print_parity(bp, UCM_REG_UCM_PRTY_STS); bnx2x_check_blocks_with_parity1()
4748 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4750 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4757 _print_parity(bp, GRCBASE_UPB + bnx2x_check_blocks_with_parity1()
4764 _print_parity(bp, bnx2x_check_blocks_with_parity1()
4771 _print_parity(bp, CCM_REG_CCM_PRTY_STS); bnx2x_check_blocks_with_parity1()
4784 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity2() argument
4802 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4804 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4809 _print_parity(bp, PXP_REG_PXP_PRTY_STS); bnx2x_check_blocks_with_parity2()
4810 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4812 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4821 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4826 _print_parity(bp, CDU_REG_CDU_PRTY_STS); bnx2x_check_blocks_with_parity2()
4830 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4835 if (CHIP_IS_E1x(bp)) bnx2x_check_blocks_with_parity2()
4836 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4839 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4844 _print_parity(bp, bnx2x_check_blocks_with_parity2()
4858 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity3() argument
4894 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, bnx2x_check_blocks_with_parity3()
4907 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, bnx2x_check_blocks_with_parity4() argument
4925 _print_parity(bp, bnx2x_check_blocks_with_parity4()
4930 _print_parity(bp, bnx2x_check_blocks_with_parity4()
4943 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, bnx2x_parity_attn() argument
4968 netdev_err(bp->dev, bnx2x_parity_attn()
4974 res |= bnx2x_check_blocks_with_parity0(bp, bnx2x_parity_attn()
4976 res |= bnx2x_check_blocks_with_parity1(bp, bnx2x_parity_attn()
4978 res |= bnx2x_check_blocks_with_parity2(bp, bnx2x_parity_attn()
4980 res |= bnx2x_check_blocks_with_parity3(bp, bnx2x_parity_attn()
4982 res |= bnx2x_check_blocks_with_parity4(bp, bnx2x_parity_attn()
4995 * @bp: driver handle
4999 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) bnx2x_chk_parity_attn() argument
5002 int port = BP_PORT(bp); bnx2x_chk_parity_attn()
5004 attn.sig[0] = REG_RD(bp, bnx2x_chk_parity_attn()
5007 attn.sig[1] = REG_RD(bp, bnx2x_chk_parity_attn()
5010 attn.sig[2] = REG_RD(bp, bnx2x_chk_parity_attn()
5013 attn.sig[3] = REG_RD(bp, bnx2x_chk_parity_attn()
5019 attn.sig[3] &= ((REG_RD(bp, bnx2x_chk_parity_attn()
5025 if (!CHIP_IS_E1x(bp)) bnx2x_chk_parity_attn()
5026 attn.sig[4] = REG_RD(bp, bnx2x_chk_parity_attn()
5030 return bnx2x_parity_attn(bp, global, print, attn.sig); bnx2x_chk_parity_attn()
5033 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) bnx2x_attn_int_deasserted4() argument
5038 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); bnx2x_attn_int_deasserted4()
5062 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); bnx2x_attn_int_deasserted4()
5086 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) bnx2x_attn_int_deasserted() argument
5089 int port = BP_PORT(bp); bnx2x_attn_int_deasserted()
5098 bnx2x_acquire_alr(bp); bnx2x_attn_int_deasserted()
5100 if (bnx2x_chk_parity_attn(bp, &global, true)) { bnx2x_attn_int_deasserted()
5102 bp->recovery_state = BNX2X_RECOVERY_INIT; bnx2x_attn_int_deasserted()
5103 schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_attn_int_deasserted()
5105 bnx2x_int_disable(bp); bnx2x_attn_int_deasserted()
5112 bnx2x_release_alr(bp); bnx2x_attn_int_deasserted()
5116 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); bnx2x_attn_int_deasserted()
5117 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); bnx2x_attn_int_deasserted()
5118 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); bnx2x_attn_int_deasserted()
5119 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); bnx2x_attn_int_deasserted()
5120 if (!CHIP_IS_E1x(bp)) bnx2x_attn_int_deasserted()
5122 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); bnx2x_attn_int_deasserted()
5131 group_mask = &bp->attn_group[index]; bnx2x_attn_int_deasserted()
5139 bnx2x_attn_int_deasserted4(bp, bnx2x_attn_int_deasserted()
5141 bnx2x_attn_int_deasserted3(bp, bnx2x_attn_int_deasserted()
5143 bnx2x_attn_int_deasserted1(bp, bnx2x_attn_int_deasserted()
5145 bnx2x_attn_int_deasserted2(bp, bnx2x_attn_int_deasserted()
5147 bnx2x_attn_int_deasserted0(bp, bnx2x_attn_int_deasserted()
5152 bnx2x_release_alr(bp); bnx2x_attn_int_deasserted()
5154 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_attn_int_deasserted()
5162 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); bnx2x_attn_int_deasserted()
5163 REG_WR(bp, reg_addr, val); bnx2x_attn_int_deasserted()
5165 if (~bp->attn_state & deasserted) bnx2x_attn_int_deasserted()
5171 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_deasserted()
5172 aeu_mask = REG_RD(bp, reg_addr); bnx2x_attn_int_deasserted()
5179 REG_WR(bp, reg_addr, aeu_mask); bnx2x_attn_int_deasserted()
5180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); bnx2x_attn_int_deasserted()
5182 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); bnx2x_attn_int_deasserted()
5183 bp->attn_state &= ~deasserted; bnx2x_attn_int_deasserted()
5184 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); bnx2x_attn_int_deasserted()
5187 static void bnx2x_attn_int(struct bnx2x *bp) bnx2x_attn_int() argument
5190 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. bnx2x_attn_int()
5192 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. bnx2x_attn_int()
5194 u32 attn_state = bp->attn_state; bnx2x_attn_int()
5209 bnx2x_attn_int_asserted(bp, asserted); bnx2x_attn_int()
5212 bnx2x_attn_int_deasserted(bp, deasserted); bnx2x_attn_int()
5215 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, bnx2x_igu_ack_sb() argument
5218 u32 igu_addr = bp->igu_base_addr; bnx2x_igu_ack_sb()
5220 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, bnx2x_igu_ack_sb()
5224 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) bnx2x_update_eq_prod() argument
5227 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); bnx2x_update_eq_prod()
5231 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, bnx2x_cnic_handle_cfc_del() argument
5236 if (!bp->cnic_eth_dev.starting_cid || bnx2x_cnic_handle_cfc_del()
5237 (cid < bp->cnic_eth_dev.starting_cid && bnx2x_cnic_handle_cfc_del()
5238 cid != bp->cnic_eth_dev.iscsi_l2_cid)) bnx2x_cnic_handle_cfc_del()
5247 bnx2x_panic_dump(bp, false); bnx2x_cnic_handle_cfc_del()
5249 bnx2x_cnic_cfc_comp(bp, cid, err); bnx2x_cnic_handle_cfc_del()
5253 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) bnx2x_handle_mcast_eqe() argument
5260 rparam.mcast_obj = &bp->mcast_obj; bnx2x_handle_mcast_eqe()
5262 netif_addr_lock_bh(bp->dev); bnx2x_handle_mcast_eqe()
5265 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); bnx2x_handle_mcast_eqe()
5268 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { bnx2x_handle_mcast_eqe()
5269 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_handle_mcast_eqe()
5275 netif_addr_unlock_bh(bp->dev); bnx2x_handle_mcast_eqe()
5278 static void bnx2x_handle_classification_eqe(struct bnx2x *bp, bnx2x_handle_classification_eqe() argument
5293 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) bnx2x_handle_classification_eqe()
5294 vlan_mac_obj = &bp->iscsi_l2_mac_obj; bnx2x_handle_classification_eqe()
5296 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; bnx2x_handle_classification_eqe()
5301 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj; bnx2x_handle_classification_eqe()
5308 bnx2x_handle_mcast_eqe(bp); bnx2x_handle_classification_eqe()
5316 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); bnx2x_handle_classification_eqe()
5324 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5326 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) bnx2x_handle_rx_mode_eqe() argument
5328 netif_addr_lock_bh(bp->dev); bnx2x_handle_rx_mode_eqe()
5330 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); bnx2x_handle_rx_mode_eqe()
5333 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) bnx2x_handle_rx_mode_eqe()
5334 bnx2x_set_storm_rx_mode(bp); bnx2x_handle_rx_mode_eqe()
5336 &bp->sp_state)) bnx2x_handle_rx_mode_eqe()
5337 bnx2x_set_iscsi_eth_rx_mode(bp, true); bnx2x_handle_rx_mode_eqe()
5339 &bp->sp_state)) bnx2x_handle_rx_mode_eqe()
5340 bnx2x_set_iscsi_eth_rx_mode(bp, false); bnx2x_handle_rx_mode_eqe()
5342 netif_addr_unlock_bh(bp->dev); bnx2x_handle_rx_mode_eqe()
5345 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, bnx2x_after_afex_vif_lists() argument
5352 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, bnx2x_after_afex_vif_lists()
5357 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); bnx2x_after_afex_vif_lists()
5362 static void bnx2x_after_function_update(struct bnx2x *bp) bnx2x_after_function_update() argument
5381 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { bnx2x_after_function_update()
5386 (bp->afex_def_vlan_tag & VLAN_VID_MASK); bnx2x_after_function_update()
5390 for_each_eth_queue(bp, q) { for_each_eth_queue()
5392 fp = &bp->fp[q]; for_each_eth_queue()
5393 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue()
5396 rc = bnx2x_queue_state_change(bp, &queue_params); for_each_eth_queue()
5402 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5403 fp = &bp->fp[FCOE_IDX(bp)];
5404 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5411 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5415 rc = bnx2x_queue_state_change(bp, &queue_params);
5421 bnx2x_link_report(bp);
5422 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5427 struct bnx2x *bp, u32 cid) bnx2x_cid_to_q_obj()
5431 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) bnx2x_cid_to_q_obj()
5432 return &bnx2x_fcoe_sp_obj(bp, q_obj); bnx2x_cid_to_q_obj()
5434 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; bnx2x_cid_to_q_obj()
5437 static void bnx2x_eq_int(struct bnx2x *bp) bnx2x_eq_int() argument
5446 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; bnx2x_eq_int()
5447 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; bnx2x_eq_int()
5449 hw_cons = le16_to_cpu(*bp->eq_cons_sb); bnx2x_eq_int()
5460 * specific bp, thus there is no need in "paired" read memory bnx2x_eq_int()
5463 sw_cons = bp->eq_cons; bnx2x_eq_int()
5464 sw_prod = bp->eq_prod; bnx2x_eq_int()
5466 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", bnx2x_eq_int()
5467 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); bnx2x_eq_int()
5472 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; bnx2x_eq_int()
5474 rc = bnx2x_iov_eq_sp_event(bp, elem); bnx2x_eq_int()
5489 bnx2x_vf_mbx_schedule(bp, bnx2x_eq_int()
5496 bp->stats_comp++); bnx2x_eq_int()
5503 * we may want to verify here that the bp state is bnx2x_eq_int()
5509 if (CNIC_LOADED(bp) && bnx2x_eq_int()
5510 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) bnx2x_eq_int()
5513 q_obj = bnx2x_cid_to_q_obj(bp, cid); bnx2x_eq_int()
5515 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) bnx2x_eq_int()
5522 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); bnx2x_eq_int()
5523 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int()
5530 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); bnx2x_eq_int()
5531 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int()
5542 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) bnx2x_eq_int()
5550 f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int()
5557 bnx2x_schedule_sp_rtnl(bp, cmd, 0); bnx2x_eq_int()
5563 f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int()
5565 bnx2x_after_afex_vif_lists(bp, elem); bnx2x_eq_int()
5570 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) bnx2x_eq_int()
5578 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) bnx2x_eq_int()
5586 if (f_obj->complete_cmd(bp, f_obj, bnx2x_eq_int()
5592 switch (opcode | bp->state) { bnx2x_eq_int()
5617 bnx2x_handle_classification_eqe(bp, elem); bnx2x_eq_int()
5627 bnx2x_handle_mcast_eqe(bp); bnx2x_eq_int()
5637 bnx2x_handle_rx_mode_eqe(bp); bnx2x_eq_int()
5641 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", bnx2x_eq_int()
5642 elem->message.opcode, bp->state); bnx2x_eq_int()
5649 atomic_add(spqe_cnt, &bp->eq_spq_left); bnx2x_eq_int()
5651 bp->eq_cons = sw_cons; bnx2x_eq_int()
5652 bp->eq_prod = sw_prod; bnx2x_eq_int()
5657 bnx2x_update_eq_prod(bp, bp->eq_prod); bnx2x_eq_int()
5662 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); bnx2x_sp_task() local
5668 if (atomic_read(&bp->interrupt_occurred)) { bnx2x_sp_task()
5671 u16 status = bnx2x_update_dsb_idx(bp); bnx2x_sp_task()
5675 atomic_set(&bp->interrupt_occurred, 0); bnx2x_sp_task()
5679 bnx2x_attn_int(bp); bnx2x_sp_task()
5685 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); bnx2x_sp_task()
5687 if (FCOE_INIT(bp) && bnx2x_sp_task()
5693 napi_schedule(&bnx2x_fcoe(bp, napi)); bnx2x_sp_task()
5698 bnx2x_eq_int(bp); bnx2x_sp_task()
5699 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, bnx2x_sp_task()
5700 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); bnx2x_sp_task()
5711 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, bnx2x_sp_task()
5712 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); bnx2x_sp_task()
5717 &bp->sp_state)) { bnx2x_sp_task()
5718 bnx2x_link_report(bp); bnx2x_sp_task()
5719 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); bnx2x_sp_task()
5726 struct bnx2x *bp = netdev_priv(dev); bnx2x_msix_sp_int() local
5728 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, bnx2x_msix_sp_int()
5732 if (unlikely(bp->panic)) bnx2x_msix_sp_int()
5736 if (CNIC_LOADED(bp)) { bnx2x_msix_sp_int()
5740 c_ops = rcu_dereference(bp->cnic_ops); bnx2x_msix_sp_int()
5742 c_ops->cnic_handler(bp->cnic_data, NULL); bnx2x_msix_sp_int()
5749 bnx2x_schedule_sp_task(bp); bnx2x_msix_sp_int()
5756 void bnx2x_drv_pulse(struct bnx2x *bp) bnx2x_drv_pulse() argument
5758 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, bnx2x_drv_pulse()
5759 bp->fw_drv_pulse_wr_seq); bnx2x_drv_pulse()
5764 struct bnx2x *bp = (struct bnx2x *) data; bnx2x_timer() local
5766 if (!netif_running(bp->dev)) bnx2x_timer()
5769 if (IS_PF(bp) && bnx2x_timer()
5770 !BP_NOMCP(bp)) { bnx2x_timer()
5771 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_timer()
5775 ++bp->fw_drv_pulse_wr_seq; bnx2x_timer()
5776 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; bnx2x_timer()
5777 drv_pulse = bp->fw_drv_pulse_wr_seq; bnx2x_timer()
5778 bnx2x_drv_pulse(bp); bnx2x_timer()
5780 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & bnx2x_timer()
5792 if (bp->state == BNX2X_STATE_OPEN) bnx2x_timer()
5793 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); bnx2x_timer()
5796 if (IS_VF(bp)) bnx2x_timer()
5797 bnx2x_timer_sriov(bp); bnx2x_timer()
5799 mod_timer(&bp->timer, jiffies + bp->current_interval); bnx2x_timer()
5810 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) bnx2x_fill() argument
5815 REG_WR(bp, addr + i, fill); bnx2x_fill()
5818 REG_WR8(bp, addr + i, fill); bnx2x_fill()
5822 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, bnx2x_wr_fp_sb_data() argument
5829 REG_WR(bp, BAR_CSTRORM_INTMEM + bnx2x_wr_fp_sb_data()
5835 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) bnx2x_zero_fp_sb() argument
5843 if (!CHIP_IS_E1x(bp)) { bnx2x_zero_fp_sb()
5857 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); bnx2x_zero_fp_sb()
5859 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_fp_sb()
5862 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_fp_sb()
5868 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, bnx2x_wr_sp_sb_data() argument
5871 int func = BP_FUNC(bp); bnx2x_wr_sp_sb_data()
5874 REG_WR(bp, BAR_CSTRORM_INTMEM + bnx2x_wr_sp_sb_data()
5880 static void bnx2x_zero_sp_sb(struct bnx2x *bp) bnx2x_zero_sp_sb() argument
5882 int func = BP_FUNC(bp); bnx2x_zero_sp_sb()
5889 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_zero_sp_sb()
5891 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_sp_sb()
5894 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + bnx2x_zero_sp_sb()
5937 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_init_sb() argument
5948 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_init_sb()
5953 bnx2x_zero_fp_sb(bp, fw_sb_id); bnx2x_init_sb()
5955 if (!CHIP_IS_E1x(bp)) { bnx2x_init_sb()
5958 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); bnx2x_init_sb()
5961 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); bnx2x_init_sb()
5973 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); bnx2x_init_sb()
5976 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); bnx2x_init_sb()
5994 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); bnx2x_init_sb()
5997 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, bnx2x_update_coalesce_sb() argument
6000 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, bnx2x_update_coalesce_sb()
6002 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb()
6005 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb()
6008 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, bnx2x_update_coalesce_sb()
6013 static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_init_def_sb() argument
6015 struct host_sp_status_block *def_sb = bp->def_status_blk; bnx2x_init_def_sb()
6016 dma_addr_t mapping = bp->def_status_blk_mapping; bnx2x_init_def_sb()
6019 int port = BP_PORT(bp); bnx2x_init_def_sb()
6020 int func = BP_FUNC(bp); bnx2x_init_def_sb()
6027 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_init_def_sb()
6031 igu_sp_sb_index = bp->igu_dsb_id; bnx2x_init_def_sb()
6040 bp->attn_state = 0; bnx2x_init_def_sb()
6050 bp->attn_group[index].sig[sindex] = bnx2x_init_def_sb()
6051 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); bnx2x_init_def_sb()
6053 if (!CHIP_IS_E1x(bp)) bnx2x_init_def_sb()
6059 bp->attn_group[index].sig[4] = REG_RD(bp, bnx2x_init_def_sb()
6062 bp->attn_group[index].sig[4] = 0; bnx2x_init_def_sb()
6065 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_def_sb()
6069 REG_WR(bp, reg_offset, U64_LO(section)); bnx2x_init_def_sb()
6070 REG_WR(bp, reg_offset + 4, U64_HI(section)); bnx2x_init_def_sb()
6071 } else if (!CHIP_IS_E1x(bp)) { bnx2x_init_def_sb()
6072 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); bnx2x_init_def_sb()
6073 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); bnx2x_init_def_sb()
6079 bnx2x_zero_sp_sb(bp); bnx2x_init_def_sb()
6088 sp_sb_data.p_func.vnic_id = BP_VN(bp); bnx2x_init_def_sb()
6091 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); bnx2x_init_def_sb()
6093 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); bnx2x_init_def_sb()
6096 void bnx2x_update_coalesce(struct bnx2x *bp) bnx2x_update_coalesce() argument
6100 for_each_eth_queue(bp, i) bnx2x_update_coalesce()
6101 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, bnx2x_update_coalesce()
6102 bp->tx_ticks, bp->rx_ticks); bnx2x_update_coalesce()
6105 static void bnx2x_init_sp_ring(struct bnx2x *bp) bnx2x_init_sp_ring() argument
6107 spin_lock_init(&bp->spq_lock); bnx2x_init_sp_ring()
6108 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); bnx2x_init_sp_ring()
6110 bp->spq_prod_idx = 0; bnx2x_init_sp_ring()
6111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; bnx2x_init_sp_ring()
6112 bp->spq_prod_bd = bp->spq; bnx2x_init_sp_ring()
6113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; bnx2x_init_sp_ring()
6116 static void bnx2x_init_eq_ring(struct bnx2x *bp) bnx2x_init_eq_ring() argument
6121 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; bnx2x_init_eq_ring()
6124 cpu_to_le32(U64_HI(bp->eq_mapping + bnx2x_init_eq_ring()
6127 cpu_to_le32(U64_LO(bp->eq_mapping + bnx2x_init_eq_ring()
6130 bp->eq_cons = 0; bnx2x_init_eq_ring()
6131 bp->eq_prod = NUM_EQ_DESC; bnx2x_init_eq_ring()
6132 bp->eq_cons_sb = BNX2X_EQ_INDEX; bnx2x_init_eq_ring()
6134 atomic_set(&bp->eq_spq_left, bnx2x_init_eq_ring()
6139 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, bnx2x_set_q_rx_mode() argument
6153 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; bnx2x_set_q_rx_mode()
6154 ramrod_param.func_id = BP_FUNC(bp); bnx2x_set_q_rx_mode()
6156 ramrod_param.pstate = &bp->sp_state; bnx2x_set_q_rx_mode()
6159 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); bnx2x_set_q_rx_mode()
6160 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); bnx2x_set_q_rx_mode()
6162 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); bnx2x_set_q_rx_mode()
6170 rc = bnx2x_config_rx_mode(bp, &ramrod_param); bnx2x_set_q_rx_mode()
6172 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); bnx2x_set_q_rx_mode()
6179 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, bnx2x_fill_accept_flags() argument
6204 if (bp->accept_any_vlan) { bnx2x_fill_accept_flags()
6220 if (bp->accept_any_vlan) { bnx2x_fill_accept_flags()
6240 if (IS_MF_SI(bp)) bnx2x_fill_accept_flags()
6258 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) bnx2x_set_storm_rx_mode() argument
6264 if (!NO_FCOE(bp)) bnx2x_set_storm_rx_mode()
6268 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, bnx2x_set_storm_rx_mode()
6276 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, bnx2x_set_storm_rx_mode()
6281 static void bnx2x_init_internal_common(struct bnx2x *bp) bnx2x_init_internal_common() argument
6288 REG_WR(bp, BAR_USTRORM_INTMEM + bnx2x_init_internal_common()
6290 if (!CHIP_IS_E1x(bp)) { bnx2x_init_internal_common()
6291 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, bnx2x_init_internal_common()
6292 CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_internal_common()
6297 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) bnx2x_init_internal() argument
6302 bnx2x_init_internal_common(bp); bnx2x_init_internal()
6322 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); bnx2x_fp_igu_sb_id()
6327 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); bnx2x_fp_fw_sb_id()
6332 if (CHIP_IS_E1x(fp->bp)) bnx2x_fp_cl_id()
6333 return BP_L_ID(fp->bp) + fp->index; bnx2x_fp_cl_id()
6338 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) bnx2x_init_eth_fp() argument
6340 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; bnx2x_init_eth_fp()
6366 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], for_each_cos_in_tx_queue()
6367 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), for_each_cos_in_tx_queue()
6368 FP_COS_TO_TXQ(fp, cos, bp), for_each_cos_in_tx_queue()
6374 if (IS_VF(bp))
6377 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6380 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6381 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6382 bnx2x_sp_mapping(bp, q_rdata), q_type);
6391 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6424 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) bnx2x_init_tx_rings_cnic() argument
6428 for_each_tx_queue_cnic(bp, i) bnx2x_init_tx_rings_cnic()
6429 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); bnx2x_init_tx_rings_cnic()
6432 static void bnx2x_init_tx_rings(struct bnx2x *bp) bnx2x_init_tx_rings() argument
6437 for_each_eth_queue(bp, i) bnx2x_init_tx_rings()
6438 for_each_cos_in_tx_queue(&bp->fp[i], cos) bnx2x_init_tx_rings()
6439 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); bnx2x_init_tx_rings()
6442 static void bnx2x_init_fcoe_fp(struct bnx2x *bp) bnx2x_init_fcoe_fp() argument
6444 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); bnx2x_init_fcoe_fp()
6447 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_init_fcoe_fp()
6448 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, bnx2x_init_fcoe_fp()
6450 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); bnx2x_init_fcoe_fp()
6451 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; bnx2x_init_fcoe_fp()
6452 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; bnx2x_init_fcoe_fp()
6453 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; bnx2x_init_fcoe_fp()
6454 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), bnx2x_init_fcoe_fp()
6455 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, bnx2x_init_fcoe_fp()
6461 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); bnx2x_init_fcoe_fp()
6463 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = bnx2x_init_fcoe_fp()
6473 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, bnx2x_init_fcoe_fp()
6474 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_init_fcoe_fp()
6475 bnx2x_sp_mapping(bp, q_rdata), q_type); bnx2x_init_fcoe_fp()
6479 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, bnx2x_init_fcoe_fp()
6483 void bnx2x_nic_init_cnic(struct bnx2x *bp) bnx2x_nic_init_cnic() argument
6485 if (!NO_FCOE(bp)) bnx2x_nic_init_cnic()
6486 bnx2x_init_fcoe_fp(bp); bnx2x_nic_init_cnic()
6488 bnx2x_init_sb(bp, bp->cnic_sb_mapping, bnx2x_nic_init_cnic()
6490 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); bnx2x_nic_init_cnic()
6494 bnx2x_init_rx_rings_cnic(bp); bnx2x_nic_init_cnic()
6495 bnx2x_init_tx_rings_cnic(bp); bnx2x_nic_init_cnic()
6502 void bnx2x_pre_irq_nic_init(struct bnx2x *bp) bnx2x_pre_irq_nic_init() argument
6507 for_each_eth_queue(bp, i) bnx2x_pre_irq_nic_init()
6508 bnx2x_init_eth_fp(bp, i); bnx2x_pre_irq_nic_init()
6512 bnx2x_init_rx_rings(bp); bnx2x_pre_irq_nic_init()
6513 bnx2x_init_tx_rings(bp); bnx2x_pre_irq_nic_init()
6515 if (IS_PF(bp)) { bnx2x_pre_irq_nic_init()
6517 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, bnx2x_pre_irq_nic_init()
6518 bp->common.shmem_base, bnx2x_pre_irq_nic_init()
6519 bp->common.shmem2_base, BP_PORT(bp)); bnx2x_pre_irq_nic_init()
6522 bnx2x_init_def_sb(bp); bnx2x_pre_irq_nic_init()
6523 bnx2x_update_dsb_idx(bp); bnx2x_pre_irq_nic_init()
6524 bnx2x_init_sp_ring(bp); bnx2x_pre_irq_nic_init()
6526 bnx2x_memset_stats(bp); bnx2x_pre_irq_nic_init()
6530 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) bnx2x_post_irq_nic_init() argument
6532 bnx2x_init_eq_ring(bp); bnx2x_post_irq_nic_init()
6533 bnx2x_init_internal(bp, load_code); bnx2x_post_irq_nic_init()
6534 bnx2x_pf_init(bp); bnx2x_post_irq_nic_init()
6535 bnx2x_stats_init(bp); bnx2x_post_irq_nic_init()
6541 bnx2x_int_enable(bp); bnx2x_post_irq_nic_init()
6544 bnx2x_attn_int_deasserted0(bp, bnx2x_post_irq_nic_init()
6545 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & bnx2x_post_irq_nic_init()
6550 static int bnx2x_gunzip_init(struct bnx2x *bp) bnx2x_gunzip_init() argument
6552 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, bnx2x_gunzip_init()
6553 &bp->gunzip_mapping, GFP_KERNEL); bnx2x_gunzip_init()
6554 if (bp->gunzip_buf == NULL) bnx2x_gunzip_init()
6557 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); bnx2x_gunzip_init()
6558 if (bp->strm == NULL) bnx2x_gunzip_init()
6561 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); bnx2x_gunzip_init()
6562 if (bp->strm->workspace == NULL) bnx2x_gunzip_init()
6568 kfree(bp->strm); bnx2x_gunzip_init()
6569 bp->strm = NULL; bnx2x_gunzip_init()
6572 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bnx2x_gunzip_init()
6573 bp->gunzip_mapping); bnx2x_gunzip_init()
6574 bp->gunzip_buf = NULL; bnx2x_gunzip_init()
6581 static void bnx2x_gunzip_end(struct bnx2x *bp) bnx2x_gunzip_end() argument
6583 if (bp->strm) { bnx2x_gunzip_end()
6584 vfree(bp->strm->workspace); bnx2x_gunzip_end()
6585 kfree(bp->strm); bnx2x_gunzip_end()
6586 bp->strm = NULL; bnx2x_gunzip_end()
6589 if (bp->gunzip_buf) { bnx2x_gunzip_end()
6590 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, bnx2x_gunzip_end()
6591 bp->gunzip_mapping); bnx2x_gunzip_end()
6592 bp->gunzip_buf = NULL; bnx2x_gunzip_end()
6596 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) bnx2x_gunzip() argument
6613 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; bnx2x_gunzip()
6614 bp->strm->avail_in = len - n; bnx2x_gunzip()
6615 bp->strm->next_out = bp->gunzip_buf; bnx2x_gunzip()
6616 bp->strm->avail_out = FW_BUF_SIZE; bnx2x_gunzip()
6618 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); bnx2x_gunzip()
6622 rc = zlib_inflate(bp->strm, Z_FINISH); bnx2x_gunzip()
6624 netdev_err(bp->dev, "Firmware decompression error: %s\n", bnx2x_gunzip()
6625 bp->strm->msg); bnx2x_gunzip()
6627 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); bnx2x_gunzip()
6628 if (bp->gunzip_outlen & 0x3) bnx2x_gunzip()
6629 netdev_err(bp->dev, bnx2x_gunzip()
6631 bp->gunzip_outlen); bnx2x_gunzip()
6632 bp->gunzip_outlen >>= 2; bnx2x_gunzip()
6634 zlib_inflateEnd(bp->strm); bnx2x_gunzip()
6649 static void bnx2x_lb_pckt(struct bnx2x *bp) bnx2x_lb_pckt() argument
6657 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); bnx2x_lb_pckt()
6663 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); bnx2x_lb_pckt()
6670 static int bnx2x_int_mem_test(struct bnx2x *bp) bnx2x_int_mem_test() argument
6676 if (CHIP_REV_IS_FPGA(bp)) bnx2x_int_mem_test()
6678 else if (CHIP_REV_IS_EMUL(bp)) bnx2x_int_mem_test()
6684 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); bnx2x_int_mem_test()
6685 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); bnx2x_int_mem_test()
6686 REG_WR(bp, CFC_REG_DEBUG0, 0x1); bnx2x_int_mem_test()
6687 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); bnx2x_int_mem_test()
6690 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); bnx2x_int_mem_test()
6693 bnx2x_lb_pckt(bp); bnx2x_int_mem_test()
6700 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_int_mem_test()
6701 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_int_mem_test()
6716 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test()
6729 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); bnx2x_int_mem_test()
6731 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); bnx2x_int_mem_test()
6733 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_int_mem_test()
6734 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_int_mem_test()
6739 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); bnx2x_int_mem_test()
6740 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); bnx2x_int_mem_test()
6741 REG_WR(bp, CFC_REG_DEBUG0, 0x1); bnx2x_int_mem_test()
6742 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); bnx2x_int_mem_test()
6745 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); bnx2x_int_mem_test()
6749 bnx2x_lb_pckt(bp); bnx2x_int_mem_test()
6756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_int_mem_test()
6757 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_int_mem_test()
6770 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test()
6775 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); bnx2x_int_mem_test()
6780 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); bnx2x_int_mem_test()
6786 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); bnx2x_int_mem_test()
6787 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); bnx2x_int_mem_test()
6794 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); bnx2x_int_mem_test()
6796 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); bnx2x_int_mem_test()
6798 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_int_mem_test()
6799 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_int_mem_test()
6800 if (!CNIC_SUPPORT(bp)) bnx2x_int_mem_test()
6802 REG_WR(bp, PRS_REG_NIC_MODE, 1); bnx2x_int_mem_test()
6805 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); bnx2x_int_mem_test()
6806 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); bnx2x_int_mem_test()
6807 REG_WR(bp, CFC_REG_DEBUG0, 0x0); bnx2x_int_mem_test()
6808 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); bnx2x_int_mem_test()
6815 static void bnx2x_enable_blocks_attention(struct bnx2x *bp) bnx2x_enable_blocks_attention() argument
6819 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); bnx2x_enable_blocks_attention()
6820 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention()
6821 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); bnx2x_enable_blocks_attention()
6823 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); bnx2x_enable_blocks_attention()
6824 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); bnx2x_enable_blocks_attention()
6825 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); bnx2x_enable_blocks_attention()
6832 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); bnx2x_enable_blocks_attention()
6833 REG_WR(bp, QM_REG_QM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6834 REG_WR(bp, TM_REG_TM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6835 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention()
6836 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention()
6837 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6838 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention()
6839 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention()
6840 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention()
6841 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention()
6842 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6843 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention()
6844 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention()
6845 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); bnx2x_enable_blocks_attention()
6846 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention()
6847 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention()
6848 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6849 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention()
6850 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ bnx2x_enable_blocks_attention()
6855 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention()
6858 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); bnx2x_enable_blocks_attention()
6860 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); bnx2x_enable_blocks_attention()
6861 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); bnx2x_enable_blocks_attention()
6862 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); bnx2x_enable_blocks_attention()
6863 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ bnx2x_enable_blocks_attention()
6865 if (!CHIP_IS_E1x(bp)) bnx2x_enable_blocks_attention()
6867 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); bnx2x_enable_blocks_attention()
6869 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); bnx2x_enable_blocks_attention()
6870 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); bnx2x_enable_blocks_attention()
6871 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ bnx2x_enable_blocks_attention()
6872 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ bnx2x_enable_blocks_attention()
6875 static void bnx2x_reset_common(struct bnx2x *bp) bnx2x_reset_common() argument
6880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_reset_common()
6883 if (CHIP_IS_E3(bp)) { bnx2x_reset_common()
6888 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); bnx2x_reset_common()
6891 static void bnx2x_setup_dmae(struct bnx2x *bp) bnx2x_setup_dmae() argument
6893 bp->dmae_ready = 0; bnx2x_setup_dmae()
6894 spin_lock_init(&bp->dmae_lock); bnx2x_setup_dmae()
6897 static void bnx2x_init_pxp(struct bnx2x *bp) bnx2x_init_pxp() argument
6902 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); bnx2x_init_pxp()
6905 if (bp->mrrs == -1) bnx2x_init_pxp()
6908 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); bnx2x_init_pxp()
6909 r_order = bp->mrrs; bnx2x_init_pxp()
6912 bnx2x_init_pxp_arb(bp, r_order, w_order); bnx2x_init_pxp()
6915 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) bnx2x_setup_fan_failure_detection() argument
6921 if (BP_NOMCP(bp)) bnx2x_setup_fan_failure_detection()
6925 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & bnx2x_setup_fan_failure_detection()
6940 bp, bnx2x_setup_fan_failure_detection()
6941 bp->common.shmem_base, bnx2x_setup_fan_failure_detection()
6942 bp->common.shmem2_base, bnx2x_setup_fan_failure_detection()
6952 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); bnx2x_setup_fan_failure_detection()
6955 val = REG_RD(bp, MISC_REG_SPIO_INT); bnx2x_setup_fan_failure_detection()
6957 REG_WR(bp, MISC_REG_SPIO_INT, val); bnx2x_setup_fan_failure_detection()
6960 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); bnx2x_setup_fan_failure_detection()
6962 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); bnx2x_setup_fan_failure_detection()
6965 void bnx2x_pf_disable(struct bnx2x *bp) bnx2x_pf_disable() argument
6967 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); bnx2x_pf_disable()
6970 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); bnx2x_pf_disable()
6971 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); bnx2x_pf_disable()
6972 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); bnx2x_pf_disable()
6975 static void bnx2x__common_init_phy(struct bnx2x *bp) bnx2x__common_init_phy() argument
6979 if (SHMEM2_RD(bp, size) > bnx2x__common_init_phy()
6980 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) bnx2x__common_init_phy()
6982 shmem_base[0] = bp->common.shmem_base; bnx2x__common_init_phy()
6983 shmem2_base[0] = bp->common.shmem2_base; bnx2x__common_init_phy()
6984 if (!CHIP_IS_E1x(bp)) { bnx2x__common_init_phy()
6986 SHMEM2_RD(bp, other_shmem_base_addr); bnx2x__common_init_phy()
6988 SHMEM2_RD(bp, other_shmem2_base_addr); bnx2x__common_init_phy()
6990 bnx2x_acquire_phy_lock(bp); bnx2x__common_init_phy()
6991 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, bnx2x__common_init_phy()
6992 bp->common.chip_id); bnx2x__common_init_phy()
6993 bnx2x_release_phy_lock(bp); bnx2x__common_init_phy()
6996 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) bnx2x_config_endianity() argument
6998 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); bnx2x_config_endianity()
6999 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); bnx2x_config_endianity()
7000 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); bnx2x_config_endianity()
7001 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); bnx2x_config_endianity()
7002 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); bnx2x_config_endianity()
7005 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); bnx2x_config_endianity()
7007 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); bnx2x_config_endianity()
7008 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); bnx2x_config_endianity()
7009 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); bnx2x_config_endianity()
7010 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); bnx2x_config_endianity()
7013 static void bnx2x_set_endianity(struct bnx2x *bp) bnx2x_set_endianity() argument
7016 bnx2x_config_endianity(bp, 1); bnx2x_set_endianity()
7018 bnx2x_config_endianity(bp, 0); bnx2x_set_endianity()
7022 static void bnx2x_reset_endianity(struct bnx2x *bp) bnx2x_reset_endianity() argument
7024 bnx2x_config_endianity(bp, 0); bnx2x_reset_endianity()
7030 * @bp: driver handle
7032 static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_hw_common() argument
7036 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); bnx2x_init_hw_common()
7042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_hw_common()
7044 bnx2x_reset_common(bp); bnx2x_init_hw_common()
7045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); bnx2x_init_hw_common()
7048 if (CHIP_IS_E3(bp)) { bnx2x_init_hw_common()
7052 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); bnx2x_init_hw_common()
7054 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_init_hw_common()
7056 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); bnx2x_init_hw_common()
7058 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7068 for (abs_func_id = BP_PATH(bp); bnx2x_init_hw_common()
7070 if (abs_func_id == BP_ABS_FUNC(bp)) { bnx2x_init_hw_common()
7071 REG_WR(bp, bnx2x_init_hw_common()
7077 bnx2x_pretend_func(bp, abs_func_id); bnx2x_init_hw_common()
7079 bnx2x_pf_disable(bp); bnx2x_init_hw_common()
7080 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_init_hw_common()
7084 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); bnx2x_init_hw_common()
7085 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_common()
7088 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); bnx2x_init_hw_common()
7091 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_hw_common()
7092 bnx2x_init_pxp(bp); bnx2x_init_hw_common()
7093 bnx2x_set_endianity(bp); bnx2x_init_hw_common()
7094 bnx2x_ilt_init_page_size(bp, INITOP_SET); bnx2x_init_hw_common()
7096 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) bnx2x_init_hw_common()
7097 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); bnx2x_init_hw_common()
7102 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); bnx2x_init_hw_common()
7107 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); bnx2x_init_hw_common()
7118 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7202 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); bnx2x_init_hw_common()
7203 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); bnx2x_init_hw_common()
7204 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_init_hw_common()
7206 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); bnx2x_init_hw_common()
7207 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); bnx2x_init_hw_common()
7208 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); bnx2x_init_hw_common()
7211 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); bnx2x_init_hw_common()
7212 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); bnx2x_init_hw_common()
7214 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7215 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : bnx2x_init_hw_common()
7216 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); bnx2x_init_hw_common()
7217 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); bnx2x_init_hw_common()
7219 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); bnx2x_init_hw_common()
7224 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); bnx2x_init_hw_common()
7233 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); bnx2x_init_hw_common()
7235 bnx2x_iov_init_dmae(bp); bnx2x_init_hw_common()
7238 bp->dmae_ready = 1; bnx2x_init_hw_common()
7239 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); bnx2x_init_hw_common()
7241 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); bnx2x_init_hw_common()
7243 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); bnx2x_init_hw_common()
7245 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); bnx2x_init_hw_common()
7247 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); bnx2x_init_hw_common()
7249 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common()
7250 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common()
7251 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common()
7252 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); bnx2x_init_hw_common()
7254 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); bnx2x_init_hw_common()
7257 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); bnx2x_init_hw_common()
7260 REG_WR(bp, QM_REG_SOFT_RESET, 1); bnx2x_init_hw_common()
7261 REG_WR(bp, QM_REG_SOFT_RESET, 0); bnx2x_init_hw_common()
7263 if (CNIC_SUPPORT(bp)) bnx2x_init_hw_common()
7264 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_hw_common()
7266 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); bnx2x_init_hw_common()
7268 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_hw_common()
7270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); bnx2x_init_hw_common()
7272 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); bnx2x_init_hw_common()
7274 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); bnx2x_init_hw_common()
7275 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); bnx2x_init_hw_common()
7277 if (!CHIP_IS_E1(bp)) bnx2x_init_hw_common()
7278 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); bnx2x_init_hw_common()
7280 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { bnx2x_init_hw_common()
7281 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_common()
7285 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); bnx2x_init_hw_common()
7286 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); bnx2x_init_hw_common()
7287 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); bnx2x_init_hw_common()
7288 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); bnx2x_init_hw_common()
7289 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); bnx2x_init_hw_common()
7294 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, bnx2x_init_hw_common()
7295 bp->path_has_ovlan ? 7 : 6); bnx2x_init_hw_common()
7299 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); bnx2x_init_hw_common()
7300 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); bnx2x_init_hw_common()
7301 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); bnx2x_init_hw_common()
7302 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); bnx2x_init_hw_common()
7304 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7306 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, bnx2x_init_hw_common()
7309 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, bnx2x_init_hw_common()
7316 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); bnx2x_init_hw_common()
7317 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); bnx2x_init_hw_common()
7318 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); bnx2x_init_hw_common()
7319 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); bnx2x_init_hw_common()
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_init_hw_common()
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, bnx2x_init_hw_common()
7327 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); bnx2x_init_hw_common()
7328 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); bnx2x_init_hw_common()
7329 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); bnx2x_init_hw_common()
7331 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7332 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_common()
7336 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); bnx2x_init_hw_common()
7337 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); bnx2x_init_hw_common()
7338 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); bnx2x_init_hw_common()
7339 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); bnx2x_init_hw_common()
7340 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); bnx2x_init_hw_common()
7342 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, bnx2x_init_hw_common()
7343 bp->path_has_ovlan ? 7 : 6); bnx2x_init_hw_common()
7347 REG_WR(bp, SRC_REG_SOFT_RST, 1); bnx2x_init_hw_common()
7349 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); bnx2x_init_hw_common()
7351 if (CNIC_SUPPORT(bp)) { bnx2x_init_hw_common()
7352 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); bnx2x_init_hw_common()
7353 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); bnx2x_init_hw_common()
7354 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); bnx2x_init_hw_common()
7355 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); bnx2x_init_hw_common()
7356 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); bnx2x_init_hw_common()
7357 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); bnx2x_init_hw_common()
7358 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); bnx2x_init_hw_common()
7359 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); bnx2x_init_hw_common()
7360 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); bnx2x_init_hw_common()
7361 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); bnx2x_init_hw_common()
7363 REG_WR(bp, SRC_REG_SOFT_RST, 0); bnx2x_init_hw_common()
7367 dev_alert(&bp->pdev->dev, bnx2x_init_hw_common()
7371 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); bnx2x_init_hw_common()
7373 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); bnx2x_init_hw_common()
7375 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); bnx2x_init_hw_common()
7376 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); bnx2x_init_hw_common()
7378 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); bnx2x_init_hw_common()
7381 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); bnx2x_init_hw_common()
7383 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); bnx2x_init_hw_common()
7385 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) bnx2x_init_hw_common()
7386 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); bnx2x_init_hw_common()
7388 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); bnx2x_init_hw_common()
7389 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); bnx2x_init_hw_common()
7392 REG_WR(bp, 0x2814, 0xffffffff); bnx2x_init_hw_common()
7393 REG_WR(bp, 0x3820, 0xffffffff); bnx2x_init_hw_common()
7395 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_common()
7396 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, bnx2x_init_hw_common()
7399 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, bnx2x_init_hw_common()
7403 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, bnx2x_init_hw_common()
7409 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); bnx2x_init_hw_common()
7410 if (!CHIP_IS_E1(bp)) { bnx2x_init_hw_common()
7412 if (!CHIP_IS_E3(bp)) bnx2x_init_hw_common()
7413 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); bnx2x_init_hw_common()
7415 if (CHIP_IS_E1H(bp)) bnx2x_init_hw_common()
7417 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); bnx2x_init_hw_common()
7419 if (CHIP_REV_IS_SLOW(bp)) bnx2x_init_hw_common()
7423 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common()
7428 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common()
7433 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); bnx2x_init_hw_common()
7438 REG_WR(bp, CFC_REG_DEBUG0, 0); bnx2x_init_hw_common()
7440 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_common()
7443 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); bnx2x_init_hw_common()
7444 val = *bnx2x_sp(bp, wb_data[0]); bnx2x_init_hw_common()
7447 if ((val == 0) && bnx2x_int_mem_test(bp)) { bnx2x_init_hw_common()
7453 bnx2x_setup_fan_failure_detection(bp); bnx2x_init_hw_common()
7456 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); bnx2x_init_hw_common()
7458 bnx2x_enable_blocks_attention(bp); bnx2x_init_hw_common()
7459 bnx2x_enable_blocks_parity(bp); bnx2x_init_hw_common()
7461 if (!BP_NOMCP(bp)) { bnx2x_init_hw_common()
7462 if (CHIP_IS_E1x(bp)) bnx2x_init_hw_common()
7463 bnx2x__common_init_phy(bp); bnx2x_init_hw_common()
7467 if (SHMEM2_HAS(bp, netproc_fw_ver)) bnx2x_init_hw_common()
7468 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); bnx2x_init_hw_common()
7476 * @bp: driver handle
7478 static int bnx2x_init_hw_common_chip(struct bnx2x *bp) bnx2x_init_hw_common_chip() argument
7480 int rc = bnx2x_init_hw_common(bp); bnx2x_init_hw_common_chip()
7486 if (!BP_NOMCP(bp)) bnx2x_init_hw_common_chip()
7487 bnx2x__common_init_phy(bp); bnx2x_init_hw_common_chip()
7492 static int bnx2x_init_hw_port(struct bnx2x *bp) bnx2x_init_hw_port() argument
7494 int port = BP_PORT(bp); bnx2x_init_hw_port()
7501 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); bnx2x_init_hw_port()
7503 bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_hw_port()
7504 bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_hw_port()
7505 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); bnx2x_init_hw_port()
7512 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_port()
7513 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_init_hw_port()
7515 bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_hw_port()
7516 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_hw_port()
7517 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); bnx2x_init_hw_port()
7518 bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_hw_port()
7520 bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_hw_port()
7521 bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_hw_port()
7522 bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_hw_port()
7523 bnx2x_init_block(bp, BLOCK_XCM, init_phase); bnx2x_init_hw_port()
7526 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); bnx2x_init_hw_port()
7528 if (CNIC_SUPPORT(bp)) { bnx2x_init_hw_port()
7529 bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_hw_port()
7530 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); bnx2x_init_hw_port()
7531 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); bnx2x_init_hw_port()
7534 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); bnx2x_init_hw_port()
7536 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_hw_port()
7538 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { bnx2x_init_hw_port()
7540 if (IS_MF(bp)) bnx2x_init_hw_port()
7541 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); bnx2x_init_hw_port()
7542 else if (bp->dev->mtu > 4096) { bnx2x_init_hw_port()
7543 if (bp->flags & ONE_PORT_FLAG) bnx2x_init_hw_port()
7546 val = bp->dev->mtu; bnx2x_init_hw_port()
7552 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); bnx2x_init_hw_port()
7554 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); bnx2x_init_hw_port()
7555 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); bnx2x_init_hw_port()
7558 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_init_hw_port()
7559 REG_WR(bp, (BP_PORT(bp) ? bnx2x_init_hw_port()
7563 bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_hw_port()
7564 if (CHIP_IS_E3B0(bp)) { bnx2x_init_hw_port()
7565 if (IS_MF_AFEX(bp)) { bnx2x_init_hw_port()
7567 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7570 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7573 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7581 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7584 (bp->path_has_ovlan ? 7 : 6)); bnx2x_init_hw_port()
7588 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_hw_port()
7589 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_hw_port()
7590 bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_hw_port()
7591 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_hw_port()
7593 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_hw_port()
7594 bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_hw_port()
7595 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_hw_port()
7596 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); bnx2x_init_hw_port()
7598 bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_hw_port()
7599 bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_hw_port()
7601 bnx2x_init_block(bp, BLOCK_PBF, init_phase); bnx2x_init_hw_port()
7603 if (CHIP_IS_E1x(bp)) { bnx2x_init_hw_port()
7605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); bnx2x_init_hw_port()
7608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); bnx2x_init_hw_port()
7610 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); bnx2x_init_hw_port()
7613 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); bnx2x_init_hw_port()
7615 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); bnx2x_init_hw_port()
7618 if (CNIC_SUPPORT(bp)) bnx2x_init_hw_port()
7619 bnx2x_init_block(bp, BLOCK_SRC, init_phase); bnx2x_init_hw_port()
7621 bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_hw_port()
7622 bnx2x_init_block(bp, BLOCK_CFC, init_phase); bnx2x_init_hw_port()
7624 if (CHIP_IS_E1(bp)) { bnx2x_init_hw_port()
7625 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); bnx2x_init_hw_port()
7626 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); bnx2x_init_hw_port()
7628 bnx2x_init_block(bp, BLOCK_HC, init_phase); bnx2x_init_hw_port()
7630 bnx2x_init_block(bp, BLOCK_IGU, init_phase); bnx2x_init_hw_port()
7632 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); bnx2x_init_hw_port()
7637 val = IS_MF(bp) ? 0xF7 : 0x7; bnx2x_init_hw_port()
7639 val |= CHIP_IS_E1(bp) ? 0 : 0x10; bnx2x_init_hw_port()
7640 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); bnx2x_init_hw_port()
7644 REG_WR(bp, reg, bnx2x_init_hw_port()
7645 REG_RD(bp, reg) & bnx2x_init_hw_port()
7649 REG_WR(bp, reg, bnx2x_init_hw_port()
7650 REG_RD(bp, reg) & bnx2x_init_hw_port()
7653 bnx2x_init_block(bp, BLOCK_NIG, init_phase); bnx2x_init_hw_port()
7655 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_port()
7659 if (IS_MF_AFEX(bp)) bnx2x_init_hw_port()
7660 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7664 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7667 IS_MF_SD(bp) ? 7 : 6); bnx2x_init_hw_port()
7669 if (CHIP_IS_E3(bp)) bnx2x_init_hw_port()
7670 REG_WR(bp, BP_PORT(bp) ? bnx2x_init_hw_port()
7672 NIG_REG_LLH_MF_MODE, IS_MF(bp)); bnx2x_init_hw_port()
7674 if (!CHIP_IS_E3(bp)) bnx2x_init_hw_port()
7675 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); bnx2x_init_hw_port()
7677 if (!CHIP_IS_E1(bp)) { bnx2x_init_hw_port()
7679 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, bnx2x_init_hw_port()
7680 (IS_MF_SD(bp) ? 0x1 : 0x2)); bnx2x_init_hw_port()
7682 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_port()
7684 switch (bp->mf_mode) { bnx2x_init_hw_port()
7694 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : bnx2x_init_hw_port()
7698 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); bnx2x_init_hw_port()
7699 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); bnx2x_init_hw_port()
7700 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); bnx2x_init_hw_port()
7705 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); bnx2x_init_hw_port()
7709 val = REG_RD(bp, reg_addr); bnx2x_init_hw_port()
7711 REG_WR(bp, reg_addr, val); bnx2x_init_hw_port()
7717 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) bnx2x_ilt_wr() argument
7722 if (CHIP_IS_E1(bp)) bnx2x_ilt_wr()
7729 REG_WR_DMAE(bp, reg, wb_write, 2); bnx2x_ilt_wr()
7732 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) bnx2x_igu_clear_sb_gen() argument
7743 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_igu_clear_sb_gen()
7757 REG_WR(bp, igu_addr_data, data); bnx2x_igu_clear_sb_gen()
7762 REG_WR(bp, igu_addr_ctl, ctl); bnx2x_igu_clear_sb_gen()
7767 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) bnx2x_igu_clear_sb_gen()
7770 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { bnx2x_igu_clear_sb_gen()
7777 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) bnx2x_igu_clear_sb() argument
7779 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); bnx2x_igu_clear_sb()
7782 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) bnx2x_clear_func_ilt() argument
7786 bnx2x_ilt_wr(bp, i, 0); bnx2x_clear_func_ilt()
7789 static void bnx2x_init_searcher(struct bnx2x *bp) bnx2x_init_searcher() argument
7791 int port = BP_PORT(bp); bnx2x_init_searcher()
7792 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); bnx2x_init_searcher()
7794 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); bnx2x_init_searcher()
7797 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) bnx2x_func_switch_update() argument
7808 func_params.f_obj = &bp->func_obj; bnx2x_func_switch_update()
7818 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_func_switch_update()
7823 static int bnx2x_reset_nic_mode(struct bnx2x *bp) bnx2x_reset_nic_mode() argument
7825 int rc, i, port = BP_PORT(bp); bnx2x_reset_nic_mode()
7829 if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_reset_nic_mode()
7830 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_reset_nic_mode()
7832 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode()
7834 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode()
7837 mac_en[i] = REG_RD(bp, port ? bnx2x_reset_nic_mode()
7842 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + bnx2x_reset_nic_mode()
7849 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : bnx2x_reset_nic_mode()
7857 rc = bnx2x_func_switch_update(bp, 1); bnx2x_reset_nic_mode()
7864 REG_WR(bp, PRS_REG_NIC_MODE, 0); bnx2x_reset_nic_mode()
7867 if (bp->mf_mode == SINGLE_FUNCTION) { bnx2x_reset_nic_mode()
7868 bnx2x_set_rx_filter(&bp->link_params, 1); bnx2x_reset_nic_mode()
7870 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : bnx2x_reset_nic_mode()
7873 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + bnx2x_reset_nic_mode()
7881 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : bnx2x_reset_nic_mode()
7885 rc = bnx2x_func_switch_update(bp, 0); bnx2x_reset_nic_mode()
7895 int bnx2x_init_hw_func_cnic(struct bnx2x *bp) bnx2x_init_hw_func_cnic() argument
7899 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); bnx2x_init_hw_func_cnic()
7901 if (CONFIGURE_NIC_MODE(bp)) { bnx2x_init_hw_func_cnic()
7903 bnx2x_init_searcher(bp); bnx2x_init_hw_func_cnic()
7906 rc = bnx2x_reset_nic_mode(bp); bnx2x_init_hw_func_cnic()
7922 static void bnx2x_clean_pglue_errors(struct bnx2x *bp) bnx2x_clean_pglue_errors() argument
7924 if (!CHIP_IS_E1x(bp)) bnx2x_clean_pglue_errors()
7925 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, bnx2x_clean_pglue_errors()
7926 1 << BP_ABS_FUNC(bp)); bnx2x_clean_pglue_errors()
7929 static int bnx2x_init_hw_func(struct bnx2x *bp) bnx2x_init_hw_func() argument
7931 int port = BP_PORT(bp); bnx2x_init_hw_func()
7932 int func = BP_FUNC(bp); bnx2x_init_hw_func()
7934 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_init_hw_func()
7943 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
7944 rc = bnx2x_pf_flr_clnup(bp); bnx2x_init_hw_func()
7946 bnx2x_fw_dump(bp); bnx2x_init_hw_func()
7952 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_hw_func()
7954 val = REG_RD(bp, addr); bnx2x_init_hw_func()
7956 REG_WR(bp, addr, val); bnx2x_init_hw_func()
7959 bnx2x_init_block(bp, BLOCK_PXP, init_phase); bnx2x_init_hw_func()
7960 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); bnx2x_init_hw_func()
7962 ilt = BP_ILT(bp); bnx2x_init_hw_func()
7965 if (IS_SRIOV(bp)) bnx2x_init_hw_func()
7967 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); bnx2x_init_hw_func()
7973 for (i = 0; i < L2_ILT_LINES(bp); i++) { bnx2x_init_hw_func()
7974 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; bnx2x_init_hw_func()
7976 bp->context[i].cxt_mapping; bnx2x_init_hw_func()
7977 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; bnx2x_init_hw_func()
7980 bnx2x_ilt_init_op(bp, INITOP_SET); bnx2x_init_hw_func()
7982 if (!CONFIGURE_NIC_MODE(bp)) { bnx2x_init_hw_func()
7983 bnx2x_init_searcher(bp); bnx2x_init_hw_func()
7984 REG_WR(bp, PRS_REG_NIC_MODE, 0); bnx2x_init_hw_func()
7988 REG_WR(bp, PRS_REG_NIC_MODE, 1); bnx2x_init_hw_func()
7992 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
7998 if (!(bp->flags & USING_MSIX_FLAG)) bnx2x_init_hw_func()
8012 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); bnx2x_init_hw_func()
8014 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); bnx2x_init_hw_func()
8017 bp->dmae_ready = 1; bnx2x_init_hw_func()
8019 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); bnx2x_init_hw_func()
8021 bnx2x_clean_pglue_errors(bp); bnx2x_init_hw_func()
8023 bnx2x_init_block(bp, BLOCK_ATC, init_phase); bnx2x_init_hw_func()
8024 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); bnx2x_init_hw_func()
8025 bnx2x_init_block(bp, BLOCK_NIG, init_phase); bnx2x_init_hw_func()
8026 bnx2x_init_block(bp, BLOCK_SRC, init_phase); bnx2x_init_hw_func()
8027 bnx2x_init_block(bp, BLOCK_MISC, init_phase); bnx2x_init_hw_func()
8028 bnx2x_init_block(bp, BLOCK_TCM, init_phase); bnx2x_init_hw_func()
8029 bnx2x_init_block(bp, BLOCK_UCM, init_phase); bnx2x_init_hw_func()
8030 bnx2x_init_block(bp, BLOCK_CCM, init_phase); bnx2x_init_hw_func()
8031 bnx2x_init_block(bp, BLOCK_XCM, init_phase); bnx2x_init_hw_func()
8032 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); bnx2x_init_hw_func()
8033 bnx2x_init_block(bp, BLOCK_USEM, init_phase); bnx2x_init_hw_func()
8034 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); bnx2x_init_hw_func()
8035 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); bnx2x_init_hw_func()
8037 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func()
8038 REG_WR(bp, QM_REG_PF_EN, 1); bnx2x_init_hw_func()
8040 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
8041 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func()
8042 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func()
8043 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func()
8044 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); bnx2x_init_hw_func()
8046 bnx2x_init_block(bp, BLOCK_QM, init_phase); bnx2x_init_hw_func()
8048 bnx2x_init_block(bp, BLOCK_TM, init_phase); bnx2x_init_hw_func()
8049 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); bnx2x_init_hw_func()
8050 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ bnx2x_init_hw_func()
8052 bnx2x_iov_init_dq(bp); bnx2x_init_hw_func()
8054 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); bnx2x_init_hw_func()
8055 bnx2x_init_block(bp, BLOCK_PRS, init_phase); bnx2x_init_hw_func()
8056 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); bnx2x_init_hw_func()
8057 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); bnx2x_init_hw_func()
8058 bnx2x_init_block(bp, BLOCK_USDM, init_phase); bnx2x_init_hw_func()
8059 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); bnx2x_init_hw_func()
8060 bnx2x_init_block(bp, BLOCK_UPB, init_phase); bnx2x_init_hw_func()
8061 bnx2x_init_block(bp, BLOCK_XPB, init_phase); bnx2x_init_hw_func()
8062 bnx2x_init_block(bp, BLOCK_PBF, init_phase); bnx2x_init_hw_func()
8063 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func()
8064 REG_WR(bp, PBF_REG_DISABLE_PF, 0); bnx2x_init_hw_func()
8066 bnx2x_init_block(bp, BLOCK_CDU, init_phase); bnx2x_init_hw_func()
8068 bnx2x_init_block(bp, BLOCK_CFC, init_phase); bnx2x_init_hw_func()
8070 if (!CHIP_IS_E1x(bp)) bnx2x_init_hw_func()
8071 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); bnx2x_init_hw_func()
8073 if (IS_MF(bp)) { bnx2x_init_hw_func()
8074 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { bnx2x_init_hw_func()
8075 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); bnx2x_init_hw_func()
8076 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, bnx2x_init_hw_func()
8077 bp->mf_ov); bnx2x_init_hw_func()
8081 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); bnx2x_init_hw_func()
8084 if (bp->common.int_block == INT_BLOCK_HC) { bnx2x_init_hw_func()
8085 if (CHIP_IS_E1H(bp)) { bnx2x_init_hw_func()
8086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_init_hw_func()
8088 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); bnx2x_init_hw_func()
8089 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); bnx2x_init_hw_func()
8091 bnx2x_init_block(bp, BLOCK_HC, init_phase); bnx2x_init_hw_func()
8096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bnx2x_init_hw_func()
8098 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
8099 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); bnx2x_init_hw_func()
8100 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); bnx2x_init_hw_func()
8103 bnx2x_init_block(bp, BLOCK_IGU, init_phase); bnx2x_init_hw_func()
8105 if (!CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
8128 num_segs = CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func()
8130 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { bnx2x_init_hw_func()
8131 prod_offset = (bp->igu_base_sb + sb_idx) * bnx2x_init_hw_func()
8137 REG_WR(bp, addr, 0); bnx2x_init_hw_func()
8140 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, bnx2x_init_hw_func()
8142 bnx2x_igu_clear_sb(bp, bnx2x_init_hw_func()
8143 bp->igu_base_sb + sb_idx); bnx2x_init_hw_func()
8147 num_segs = CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func()
8150 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_init_hw_func()
8151 dsb_idx = BP_FUNC(bp); bnx2x_init_hw_func()
8153 dsb_idx = BP_VN(bp); bnx2x_init_hw_func()
8155 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? bnx2x_init_hw_func()
8167 REG_WR(bp, addr, 0); bnx2x_init_hw_func()
8170 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_init_hw_func()
8171 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8173 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8175 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8177 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8179 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8182 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8184 bnx2x_ack_sb(bp, bp->igu_dsb_id, bnx2x_init_hw_func()
8187 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); bnx2x_init_hw_func()
8191 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); bnx2x_init_hw_func()
8192 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); bnx2x_init_hw_func()
8193 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); bnx2x_init_hw_func()
8194 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); bnx2x_init_hw_func()
8195 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); bnx2x_init_hw_func()
8196 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); bnx2x_init_hw_func()
8201 REG_WR(bp, 0x2114, 0xffffffff); bnx2x_init_hw_func()
8202 REG_WR(bp, 0x2120, 0xffffffff); bnx2x_init_hw_func()
8204 if (CHIP_IS_E1x(bp)) { bnx2x_init_hw_func()
8207 BP_PORT(bp) * (main_mem_size * 4); bnx2x_init_hw_func()
8211 val = REG_RD(bp, main_mem_prty_clr); bnx2x_init_hw_func()
8221 bnx2x_read_dmae(bp, i, main_mem_width / 4); bnx2x_init_hw_func()
8222 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), bnx2x_init_hw_func()
8226 REG_RD(bp, main_mem_prty_clr); bnx2x_init_hw_func()
8231 REG_WR8(bp, BAR_USTRORM_INTMEM + bnx2x_init_hw_func()
8232 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func()
8233 REG_WR8(bp, BAR_TSTRORM_INTMEM + bnx2x_init_hw_func()
8234 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func()
8235 REG_WR8(bp, BAR_CSTRORM_INTMEM + bnx2x_init_hw_func()
8236 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func()
8237 REG_WR8(bp, BAR_XSTRORM_INTMEM + bnx2x_init_hw_func()
8238 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); bnx2x_init_hw_func()
8241 bnx2x_phy_probe(&bp->link_params); bnx2x_init_hw_func()
8246 void bnx2x_free_mem_cnic(struct bnx2x *bp) bnx2x_free_mem_cnic() argument
8248 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); bnx2x_free_mem_cnic()
8250 if (!CHIP_IS_E1x(bp)) bnx2x_free_mem_cnic()
8251 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, bnx2x_free_mem_cnic()
8254 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, bnx2x_free_mem_cnic()
8257 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); bnx2x_free_mem_cnic()
8260 void bnx2x_free_mem(struct bnx2x *bp) bnx2x_free_mem() argument
8264 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bnx2x_free_mem()
8265 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_free_mem()
8267 if (IS_VF(bp)) bnx2x_free_mem()
8270 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, bnx2x_free_mem()
8273 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, bnx2x_free_mem()
8276 for (i = 0; i < L2_ILT_LINES(bp); i++) bnx2x_free_mem()
8277 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, bnx2x_free_mem()
8278 bp->context[i].size); bnx2x_free_mem()
8279 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); bnx2x_free_mem()
8281 BNX2X_FREE(bp->ilt->lines); bnx2x_free_mem()
8283 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); bnx2x_free_mem()
8285 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, bnx2x_free_mem()
8288 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); bnx2x_free_mem()
8290 bnx2x_iov_free_mem(bp); bnx2x_free_mem()
8293 int bnx2x_alloc_mem_cnic(struct bnx2x *bp) bnx2x_alloc_mem_cnic() argument
8295 if (!CHIP_IS_E1x(bp)) { bnx2x_alloc_mem_cnic()
8297 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, bnx2x_alloc_mem_cnic()
8299 if (!bp->cnic_sb.e2_sb) bnx2x_alloc_mem_cnic()
8302 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, bnx2x_alloc_mem_cnic()
8304 if (!bp->cnic_sb.e1x_sb) bnx2x_alloc_mem_cnic()
8308 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { bnx2x_alloc_mem_cnic()
8310 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); bnx2x_alloc_mem_cnic()
8311 if (!bp->t2) bnx2x_alloc_mem_cnic()
8316 bp->cnic_eth_dev.addr_drv_info_to_mcp = bnx2x_alloc_mem_cnic()
8317 &bp->slowpath->drv_info_to_mcp; bnx2x_alloc_mem_cnic()
8319 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) bnx2x_alloc_mem_cnic()
8325 bnx2x_free_mem_cnic(bp); bnx2x_alloc_mem_cnic()
8330 int bnx2x_alloc_mem(struct bnx2x *bp) bnx2x_alloc_mem() argument
8334 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { bnx2x_alloc_mem()
8336 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); bnx2x_alloc_mem()
8337 if (!bp->t2) bnx2x_alloc_mem()
8341 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, bnx2x_alloc_mem()
8343 if (!bp->def_status_blk) bnx2x_alloc_mem()
8346 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, bnx2x_alloc_mem()
8348 if (!bp->slowpath) bnx2x_alloc_mem()
8364 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); bnx2x_alloc_mem()
8367 bp->context[i].size = min(CDU_ILT_PAGE_SZ, bnx2x_alloc_mem()
8369 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, bnx2x_alloc_mem()
8370 bp->context[i].size); bnx2x_alloc_mem()
8371 if (!bp->context[i].vcxt) bnx2x_alloc_mem()
8373 allocated += bp->context[i].size; bnx2x_alloc_mem()
8375 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), bnx2x_alloc_mem()
8377 if (!bp->ilt->lines) bnx2x_alloc_mem()
8380 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) bnx2x_alloc_mem()
8383 if (bnx2x_iov_alloc_mem(bp)) bnx2x_alloc_mem()
8387 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); bnx2x_alloc_mem()
8388 if (!bp->spq) bnx2x_alloc_mem()
8392 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, bnx2x_alloc_mem()
8394 if (!bp->eq_ring) bnx2x_alloc_mem()
8400 bnx2x_free_mem(bp); bnx2x_alloc_mem()
8409 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, bnx2x_set_mac_one() argument
8435 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); bnx2x_set_mac_one()
8447 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan, bnx2x_set_vlan_one() argument
8470 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); bnx2x_set_vlan_one()
8483 int bnx2x_del_all_macs(struct bnx2x *bp, bnx2x_del_all_macs() argument
8497 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); bnx2x_del_all_macs()
8504 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) bnx2x_set_eth_mac() argument
8506 if (IS_PF(bp)) { bnx2x_set_eth_mac()
8511 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, bnx2x_set_eth_mac()
8512 &bp->sp_objs->mac_obj, set, bnx2x_set_eth_mac()
8515 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bnx2x_set_eth_mac()
8516 bp->fp->index, set); bnx2x_set_eth_mac()
8520 int bnx2x_setup_leading(struct bnx2x *bp) bnx2x_setup_leading() argument
8522 if (IS_PF(bp)) bnx2x_setup_leading()
8523 return bnx2x_setup_queue(bp, &bp->fp[0], true); bnx2x_setup_leading()
8525 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); bnx2x_setup_leading()
8531 * @bp: driver handle
8535 int bnx2x_set_int_mode(struct bnx2x *bp) bnx2x_set_int_mode() argument
8539 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { bnx2x_set_int_mode()
8547 rc = bnx2x_enable_msix(bp); bnx2x_set_int_mode()
8554 if (rc && IS_VF(bp)) bnx2x_set_int_mode()
8559 bp->num_queues, bnx2x_set_int_mode()
8560 1 + bp->num_cnic_queues); bnx2x_set_int_mode()
8564 bnx2x_enable_msi(bp); bnx2x_set_int_mode()
8568 bp->num_ethernet_queues = 1; bnx2x_set_int_mode()
8569 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_set_int_mode()
8580 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) bnx2x_cid_ilt_lines() argument
8582 if (IS_SRIOV(bp)) bnx2x_cid_ilt_lines()
8584 return L2_ILT_LINES(bp); bnx2x_cid_ilt_lines()
8587 void bnx2x_ilt_set_info(struct bnx2x *bp) bnx2x_ilt_set_info() argument
8590 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_set_info()
8593 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); bnx2x_ilt_set_info()
8602 line += bnx2x_cid_ilt_lines(bp); bnx2x_ilt_set_info()
8604 if (CNIC_SUPPORT(bp)) bnx2x_ilt_set_info()
8616 if (QM_INIT(bp->qm_cid_count)) { bnx2x_ilt_set_info()
8624 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, bnx2x_ilt_set_info()
8638 if (CNIC_SUPPORT(bp)) { bnx2x_ilt_set_info()
8680 * @bp: driver handle
8688 static void bnx2x_pf_q_prep_init(struct bnx2x *bp, bnx2x_pf_q_prep_init() argument
8706 init_params->rx.hc_rate = bp->rx_ticks ? bnx2x_pf_q_prep_init()
8707 (1000000 / bp->rx_ticks) : 0; bnx2x_pf_q_prep_init()
8708 init_params->tx.hc_rate = bp->tx_ticks ? bnx2x_pf_q_prep_init()
8709 (1000000 / bp->tx_ticks) : 0; bnx2x_pf_q_prep_init()
8735 &bp->context[cxt_index].vcxt[cxt_offset].eth; bnx2x_pf_q_prep_init()
8739 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_setup_tx_only() argument
8750 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); bnx2x_setup_tx_only()
8756 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); bnx2x_setup_tx_only()
8759 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); bnx2x_setup_tx_only()
8768 return bnx2x_queue_state_change(bp, q_params); bnx2x_setup_tx_only()
8774 * @bp: driver handle
8782 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_setup_queue() argument
8797 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, bnx2x_setup_queue()
8800 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_setup_queue()
8805 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); bnx2x_setup_queue()
8811 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_setup_queue()
8823 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); bnx2x_setup_queue()
8826 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, bnx2x_setup_queue()
8829 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, bnx2x_setup_queue()
8832 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, bnx2x_setup_queue()
8839 bp->fcoe_init = true; bnx2x_setup_queue()
8842 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_setup_queue()
8854 rc = bnx2x_setup_tx_only(bp, fp, &q_params, bnx2x_setup_queue()
8866 static int bnx2x_stop_queue(struct bnx2x *bp, int index) bnx2x_stop_queue() argument
8868 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_stop_queue()
8875 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; bnx2x_stop_queue()
8896 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue()
8905 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue()
8912 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue()
8921 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue()
8929 return bnx2x_queue_state_change(bp, &q_params); bnx2x_stop_queue()
8932 static void bnx2x_reset_func(struct bnx2x *bp) bnx2x_reset_func() argument
8934 int port = BP_PORT(bp); bnx2x_reset_func()
8935 int func = BP_FUNC(bp); bnx2x_reset_func()
8939 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func()
8940 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func()
8941 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func()
8942 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); bnx2x_reset_func()
8945 for_each_eth_queue(bp, i) { for_each_eth_queue()
8946 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
8947 REG_WR8(bp, BAR_CSTRORM_INTMEM + for_each_eth_queue()
8952 if (CNIC_LOADED(bp))
8954 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8956 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8959 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8964 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8968 if (bp->common.int_block == INT_BLOCK_HC) {
8969 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8970 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8972 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8973 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8976 if (CNIC_LOADED(bp)) {
8978 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8985 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8990 bnx2x_clear_func_ilt(bp, func);
8995 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9003 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9007 if (!CHIP_IS_E1x(bp))
9008 bnx2x_pf_disable(bp);
9010 bp->dmae_ready = 0;
9013 static void bnx2x_reset_port(struct bnx2x *bp) bnx2x_reset_port() argument
9015 int port = BP_PORT(bp); bnx2x_reset_port()
9019 bnx2x__link_reset(bp); bnx2x_reset_port()
9021 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); bnx2x_reset_port()
9024 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); bnx2x_reset_port()
9026 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : bnx2x_reset_port()
9030 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); bnx2x_reset_port()
9034 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); bnx2x_reset_port()
9042 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) bnx2x_reset_hw() argument
9049 func_params.f_obj = &bp->func_obj; bnx2x_reset_hw()
9054 return bnx2x_func_state_change(bp, &func_params); bnx2x_reset_hw()
9057 static int bnx2x_func_stop(struct bnx2x *bp) bnx2x_func_stop() argument
9064 func_params.f_obj = &bp->func_obj; bnx2x_func_stop()
9073 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_func_stop()
9080 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_stop()
9090 * @bp: driver handle
9095 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) bnx2x_send_unload_req() argument
9098 int port = BP_PORT(bp); bnx2x_send_unload_req()
9104 else if (bp->flags & NO_WOL_FLAG) bnx2x_send_unload_req()
9107 else if (bp->wol) { bnx2x_send_unload_req()
9109 u8 *mac_addr = bp->dev->dev_addr; bnx2x_send_unload_req()
9110 struct pci_dev *pdev = bp->pdev; bnx2x_send_unload_req()
9117 u8 entry = (BP_VN(bp) + 1)*8; bnx2x_send_unload_req()
9120 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); bnx2x_send_unload_req()
9124 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); bnx2x_send_unload_req()
9137 if (!BP_NOMCP(bp)) bnx2x_send_unload_req()
9138 reset_code = bnx2x_fw_command(bp, reset_code, 0); bnx2x_send_unload_req()
9140 int path = BP_PATH(bp); bnx2x_send_unload_req()
9164 * @bp: driver handle
9167 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) bnx2x_send_unload_done() argument
9172 if (!BP_NOMCP(bp)) bnx2x_send_unload_done()
9173 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); bnx2x_send_unload_done()
9176 static int bnx2x_func_wait_started(struct bnx2x *bp) bnx2x_func_wait_started() argument
9179 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; bnx2x_func_wait_started()
9181 if (!bp->port.pmf) bnx2x_func_wait_started()
9200 synchronize_irq(bp->msix_table[0].vector); bnx2x_func_wait_started()
9202 synchronize_irq(bp->pdev->irq); bnx2x_func_wait_started()
9207 while (bnx2x_func_get_state(bp, &bp->func_obj) != bnx2x_func_wait_started()
9211 if (bnx2x_func_get_state(bp, &bp->func_obj) != bnx2x_func_wait_started()
9226 func_params.f_obj = &bp->func_obj; bnx2x_func_wait_started()
9232 bnx2x_func_state_change(bp, &func_params); bnx2x_func_wait_started()
9236 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_wait_started()
9243 static void bnx2x_disable_ptp(struct bnx2x *bp) bnx2x_disable_ptp() argument
9245 int port = BP_PORT(bp); bnx2x_disable_ptp()
9248 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_disable_ptp()
9252 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_disable_ptp()
9254 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_disable_ptp()
9256 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_disable_ptp()
9258 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_disable_ptp()
9262 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : bnx2x_disable_ptp()
9267 static void bnx2x_stop_ptp(struct bnx2x *bp) bnx2x_stop_ptp() argument
9272 cancel_work_sync(&bp->ptp_task); bnx2x_stop_ptp()
9274 if (bp->ptp_tx_skb) { bnx2x_stop_ptp()
9275 dev_kfree_skb_any(bp->ptp_tx_skb); bnx2x_stop_ptp()
9276 bp->ptp_tx_skb = NULL; bnx2x_stop_ptp()
9280 bnx2x_disable_ptp(bp); bnx2x_stop_ptp()
9285 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_chip_cleanup() argument
9287 int port = BP_PORT(bp); bnx2x_chip_cleanup()
9294 for_each_tx_queue(bp, i) { for_each_tx_queue()
9295 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_tx_queue()
9298 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); for_each_tx_queue()
9309 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9315 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9322 if (!CHIP_IS_E1(bp))
9323 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9329 netif_addr_lock_bh(bp->dev);
9331 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9332 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9334 bnx2x_set_storm_rx_mode(bp);
9337 rparam.mcast_obj = &bp->mcast_obj;
9338 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9342 netif_addr_unlock_bh(bp->dev);
9344 bnx2x_iov_chip_cleanup(bp);
9351 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9357 rc = bnx2x_func_wait_started(bp);
9368 for_each_eth_queue(bp, i) for_each_eth_queue()
9369 if (bnx2x_stop_queue(bp, i)) for_each_eth_queue()
9376 if (CNIC_LOADED(bp)) { for_each_eth_queue()
9377 for_each_cnic_queue(bp, i) for_each_eth_queue()
9378 if (bnx2x_stop_queue(bp, i)) for_each_eth_queue()
9389 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9395 rc = bnx2x_func_stop(bp);
9408 if (bp->flags & PTP_SUPPORTED)
9409 bnx2x_stop_ptp(bp);
9412 bnx2x_netif_stop(bp, 1);
9414 bnx2x_del_all_napi(bp);
9415 if (CNIC_LOADED(bp))
9416 bnx2x_del_all_napi_cnic(bp);
9419 bnx2x_free_irq(bp);
9422 rc = bnx2x_reset_hw(bp, reset_code);
9427 bnx2x_send_unload_done(bp, keep_link);
9430 void bnx2x_disable_close_the_gate(struct bnx2x *bp) bnx2x_disable_close_the_gate() argument
9436 if (CHIP_IS_E1(bp)) { bnx2x_disable_close_the_gate()
9437 int port = BP_PORT(bp); bnx2x_disable_close_the_gate()
9441 val = REG_RD(bp, addr); bnx2x_disable_close_the_gate()
9443 REG_WR(bp, addr, val); bnx2x_disable_close_the_gate()
9445 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); bnx2x_disable_close_the_gate()
9448 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); bnx2x_disable_close_the_gate()
9453 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) bnx2x_set_234_gates() argument
9458 if (!CHIP_IS_E1(bp)) { bnx2x_set_234_gates()
9460 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); bnx2x_set_234_gates()
9462 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); bnx2x_set_234_gates()
9466 if (CHIP_IS_E1x(bp)) { bnx2x_set_234_gates()
9468 val = REG_RD(bp, HC_REG_CONFIG_1); bnx2x_set_234_gates()
9469 REG_WR(bp, HC_REG_CONFIG_1, bnx2x_set_234_gates()
9473 val = REG_RD(bp, HC_REG_CONFIG_0); bnx2x_set_234_gates()
9474 REG_WR(bp, HC_REG_CONFIG_0, bnx2x_set_234_gates()
9479 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); bnx2x_set_234_gates()
9481 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, bnx2x_set_234_gates()
9494 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) bnx2x_clp_reset_prep() argument
9497 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); bnx2x_clp_reset_prep()
9499 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); bnx2x_clp_reset_prep()
9505 * @bp: driver handle
9508 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) bnx2x_clp_reset_done() argument
9511 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); bnx2x_clp_reset_done()
9512 MF_CFG_WR(bp, shared_mf_config.clp_mb, bnx2x_clp_reset_done()
9519 * @bp: driver handle
9524 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) bnx2x_reset_mcp_prep() argument
9532 if (!CHIP_IS_E1(bp)) bnx2x_reset_mcp_prep()
9533 bnx2x_clp_reset_prep(bp, magic_val); bnx2x_reset_mcp_prep()
9536 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); bnx2x_reset_mcp_prep()
9538 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); bnx2x_reset_mcp_prep()
9542 REG_WR(bp, shmem + validity_offset, 0); bnx2x_reset_mcp_prep()
9551 * @bp: driver handle
9553 static void bnx2x_mcp_wait_one(struct bnx2x *bp) bnx2x_mcp_wait_one() argument
9557 if (CHIP_REV_IS_SLOW(bp)) bnx2x_mcp_wait_one()
9564 * initializes bp->common.shmem_base and waits for validity signature to appear
9566 static int bnx2x_init_shmem(struct bnx2x *bp) bnx2x_init_shmem() argument
9572 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); bnx2x_init_shmem()
9573 if (bp->common.shmem_base) { bnx2x_init_shmem()
9574 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); bnx2x_init_shmem()
9579 bnx2x_mcp_wait_one(bp); bnx2x_init_shmem()
9588 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) bnx2x_reset_mcp_comp() argument
9590 int rc = bnx2x_init_shmem(bp); bnx2x_reset_mcp_comp()
9593 if (!CHIP_IS_E1(bp)) bnx2x_reset_mcp_comp()
9594 bnx2x_clp_reset_done(bp, magic_val); bnx2x_reset_mcp_comp()
9599 static void bnx2x_pxp_prep(struct bnx2x *bp) bnx2x_pxp_prep() argument
9601 if (!CHIP_IS_E1(bp)) { bnx2x_pxp_prep()
9602 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); bnx2x_pxp_prep()
9603 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); bnx2x_pxp_prep()
9618 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) bnx2x_process_kill_chip_reset() argument
9670 if (CHIP_IS_E1(bp)) bnx2x_process_kill_chip_reset()
9672 else if (CHIP_IS_E1H(bp)) bnx2x_process_kill_chip_reset()
9674 else if (CHIP_IS_E2(bp)) bnx2x_process_kill_chip_reset()
9697 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_process_kill_chip_reset()
9700 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, bnx2x_process_kill_chip_reset()
9706 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_process_kill_chip_reset()
9712 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); bnx2x_process_kill_chip_reset()
9720 * @bp: driver handle
9725 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) bnx2x_er_poll_igu_vq() argument
9731 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); bnx2x_er_poll_igu_vq()
9748 static int bnx2x_process_kill(struct bnx2x *bp, bool global) bnx2x_process_kill() argument
9757 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); bnx2x_process_kill()
9758 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); bnx2x_process_kill()
9759 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); bnx2x_process_kill()
9760 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); bnx2x_process_kill()
9761 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); bnx2x_process_kill()
9762 if (CHIP_IS_E3(bp)) bnx2x_process_kill()
9763 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); bnx2x_process_kill()
9769 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) bnx2x_process_kill()
9785 bnx2x_set_234_gates(bp, true); bnx2x_process_kill()
9788 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) bnx2x_process_kill()
9794 REG_WR(bp, MISC_REG_UNPREPARED, 0); bnx2x_process_kill()
9808 bnx2x_reset_mcp_prep(bp, &val); bnx2x_process_kill()
9811 bnx2x_pxp_prep(bp); bnx2x_process_kill()
9815 bnx2x_process_kill_chip_reset(bp, global); bnx2x_process_kill()
9819 if (!CHIP_IS_E1x(bp)) bnx2x_process_kill()
9820 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); bnx2x_process_kill()
9824 if (global && bnx2x_reset_mcp_comp(bp, val)) bnx2x_process_kill()
9830 bnx2x_set_234_gates(bp, false); bnx2x_process_kill()
9838 static int bnx2x_leader_reset(struct bnx2x *bp) bnx2x_leader_reset() argument
9841 bool global = bnx2x_reset_is_global(bp); bnx2x_leader_reset()
9847 if (!global && !BP_NOMCP(bp)) { bnx2x_leader_reset()
9848 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, bnx2x_leader_reset()
9861 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_leader_reset()
9870 if (bnx2x_process_kill(bp, global)) { bnx2x_leader_reset()
9872 BP_PATH(bp)); bnx2x_leader_reset()
9881 bnx2x_set_reset_done(bp); bnx2x_leader_reset()
9883 bnx2x_clear_reset_global(bp); bnx2x_leader_reset()
9887 if (!global && !BP_NOMCP(bp)) { bnx2x_leader_reset()
9888 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bnx2x_leader_reset()
9889 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); bnx2x_leader_reset()
9892 bp->is_leader = 0; bnx2x_leader_reset()
9893 bnx2x_release_leader_lock(bp); bnx2x_leader_reset()
9898 static void bnx2x_recovery_failed(struct bnx2x *bp) bnx2x_recovery_failed() argument
9900 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); bnx2x_recovery_failed()
9903 netif_device_detach(bp->dev); bnx2x_recovery_failed()
9909 bnx2x_set_reset_in_progress(bp); bnx2x_recovery_failed()
9912 bnx2x_set_power_state(bp, PCI_D3hot); bnx2x_recovery_failed()
9914 bp->recovery_state = BNX2X_RECOVERY_FAILED; bnx2x_recovery_failed()
9922 * will never be called when netif_running(bp->dev) is false.
9924 static void bnx2x_parity_recover(struct bnx2x *bp) bnx2x_parity_recover() argument
9932 switch (bp->recovery_state) { bnx2x_parity_recover()
9935 is_parity = bnx2x_chk_parity_attn(bp, &global, false); bnx2x_parity_recover()
9939 if (bnx2x_trylock_leader_lock(bp)) { bnx2x_parity_recover()
9940 bnx2x_set_reset_in_progress(bp); bnx2x_parity_recover()
9948 bnx2x_set_reset_global(bp); bnx2x_parity_recover()
9950 bp->is_leader = 1; bnx2x_parity_recover()
9955 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) bnx2x_parity_recover()
9958 bp->recovery_state = BNX2X_RECOVERY_WAIT; bnx2x_parity_recover()
9969 if (bp->is_leader) { bnx2x_parity_recover()
9970 int other_engine = BP_PATH(bp) ? 0 : 1; bnx2x_parity_recover()
9972 bnx2x_get_load_status(bp, other_engine); bnx2x_parity_recover()
9974 bnx2x_get_load_status(bp, BP_PATH(bp)); bnx2x_parity_recover()
9975 global = bnx2x_reset_is_global(bp); bnx2x_parity_recover()
9990 schedule_delayed_work(&bp->sp_rtnl_task, bnx2x_parity_recover()
9999 if (bnx2x_leader_reset(bp)) { bnx2x_parity_recover()
10000 bnx2x_recovery_failed(bp); bnx2x_parity_recover()
10012 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { bnx2x_parity_recover()
10019 if (bnx2x_trylock_leader_lock(bp)) { bnx2x_parity_recover()
10023 bp->is_leader = 1; bnx2x_parity_recover()
10027 schedule_delayed_work(&bp->sp_rtnl_task, bnx2x_parity_recover()
10036 if (bnx2x_reset_is_global(bp)) { bnx2x_parity_recover()
10038 &bp->sp_rtnl_task, bnx2x_parity_recover()
10044 bp->eth_stats.recoverable_error; bnx2x_parity_recover()
10046 bp->eth_stats.unrecoverable_error; bnx2x_parity_recover()
10047 bp->recovery_state = bnx2x_parity_recover()
10049 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { bnx2x_parity_recover()
10051 netdev_err(bp->dev, bnx2x_parity_recover()
10054 netif_device_detach(bp->dev); bnx2x_parity_recover()
10057 bp, PCI_D3hot); bnx2x_parity_recover()
10060 bp->recovery_state = bnx2x_parity_recover()
10065 bp->eth_stats.recoverable_error = bnx2x_parity_recover()
10067 bp->eth_stats.unrecoverable_error = bnx2x_parity_recover()
10080 static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) bnx2x_vxlan_port_update() argument
10092 func_params.f_obj = &bp->func_obj; bnx2x_vxlan_port_update()
10099 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_vxlan_port_update()
10106 static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) __bnx2x_add_vxlan_port() argument
10108 if (!netif_running(bp->dev)) __bnx2x_add_vxlan_port()
10111 if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { __bnx2x_add_vxlan_port()
10112 bp->vxlan_dst_port_count++; __bnx2x_add_vxlan_port()
10116 if (bp->vxlan_dst_port_count || !IS_PF(bp)) { __bnx2x_add_vxlan_port()
10121 bp->vxlan_dst_port = port; __bnx2x_add_vxlan_port()
10122 bp->vxlan_dst_port_count = 1; __bnx2x_add_vxlan_port()
10123 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); __bnx2x_add_vxlan_port()
10129 struct bnx2x *bp = netdev_priv(netdev); bnx2x_add_vxlan_port() local
10132 __bnx2x_add_vxlan_port(bp, t_port); bnx2x_add_vxlan_port()
10135 static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) __bnx2x_del_vxlan_port() argument
10137 if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || __bnx2x_del_vxlan_port()
10138 !IS_PF(bp)) { __bnx2x_del_vxlan_port()
10142 bp->vxlan_dst_port_count--; __bnx2x_del_vxlan_port()
10143 if (bp->vxlan_dst_port_count) __bnx2x_del_vxlan_port()
10146 if (netif_running(bp->dev)) { __bnx2x_del_vxlan_port()
10147 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); __bnx2x_del_vxlan_port()
10149 bp->vxlan_dst_port = 0; __bnx2x_del_vxlan_port()
10150 netdev_info(bp->dev, "Deleted vxlan dest port %d", port); __bnx2x_del_vxlan_port()
10157 struct bnx2x *bp = netdev_priv(netdev); bnx2x_del_vxlan_port() local
10160 __bnx2x_del_vxlan_port(bp, t_port); bnx2x_del_vxlan_port()
10171 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); bnx2x_sp_rtnl_task() local
10178 if (!netif_running(bp->dev)) { bnx2x_sp_rtnl_task()
10183 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { bnx2x_sp_rtnl_task()
10193 bp->sp_rtnl_state = 0; bnx2x_sp_rtnl_task()
10196 bnx2x_parity_recover(bp); bnx2x_sp_rtnl_task()
10202 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10213 bp->sp_rtnl_state = 0; bnx2x_sp_rtnl_task()
10216 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_sp_rtnl_task()
10217 bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_sp_rtnl_task()
10225 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task()
10226 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); bnx2x_sp_rtnl_task()
10227 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task()
10228 bnx2x_after_function_update(bp); bnx2x_sp_rtnl_task()
10234 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10236 netif_device_detach(bp->dev); bnx2x_sp_rtnl_task()
10237 bnx2x_close(bp->dev); bnx2x_sp_rtnl_task()
10242 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10245 bnx2x_vfpf_set_mcast(bp->dev); bnx2x_sp_rtnl_task()
10248 &bp->sp_rtnl_state)){ bnx2x_sp_rtnl_task()
10249 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { bnx2x_sp_rtnl_task()
10250 bnx2x_tx_disable(bp); bnx2x_sp_rtnl_task()
10255 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10257 bnx2x_set_rx_mode_inner(bp); bnx2x_sp_rtnl_task()
10261 &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task()
10262 bnx2x_pf_set_vfs_vlan(bp); bnx2x_sp_rtnl_task()
10264 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10265 bnx2x_dcbx_stop_hw_tx(bp); bnx2x_sp_rtnl_task()
10266 bnx2x_dcbx_resume_hw_tx(bp); bnx2x_sp_rtnl_task()
10270 &bp->sp_rtnl_state)) bnx2x_sp_rtnl_task()
10271 bnx2x_update_mng_version(bp); bnx2x_sp_rtnl_task()
10274 port = bp->vxlan_dst_port; bnx2x_sp_rtnl_task()
10276 &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10277 if (!bnx2x_vxlan_port_update(bp, port)) bnx2x_sp_rtnl_task()
10278 netdev_info(bp->dev, "Added vxlan dest port %d", port); bnx2x_sp_rtnl_task()
10280 bp->vxlan_dst_port = 0; bnx2x_sp_rtnl_task()
10284 &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10285 if (!bnx2x_vxlan_port_update(bp, 0)) { bnx2x_sp_rtnl_task()
10286 netdev_info(bp->dev, bnx2x_sp_rtnl_task()
10288 bp->vxlan_dst_port = 0; bnx2x_sp_rtnl_task()
10289 vxlan_get_rx_port(bp->dev); bnx2x_sp_rtnl_task()
10300 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, bnx2x_sp_rtnl_task()
10301 &bp->sp_rtnl_state)) { bnx2x_sp_rtnl_task()
10302 bnx2x_disable_sriov(bp); bnx2x_sp_rtnl_task()
10303 bnx2x_enable_sriov(bp); bnx2x_sp_rtnl_task()
10309 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); bnx2x_period_task() local
10311 if (!netif_running(bp->dev)) bnx2x_period_task()
10314 if (CHIP_REV_IS_SLOW(bp)) { bnx2x_period_task()
10319 bnx2x_acquire_phy_lock(bp); bnx2x_period_task()
10322 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and bnx2x_period_task()
10326 if (bp->port.pmf) { bnx2x_period_task()
10327 bnx2x_period_func(&bp->link_params, &bp->link_vars); bnx2x_period_task()
10330 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); bnx2x_period_task()
10333 bnx2x_release_phy_lock(bp); bnx2x_period_task()
10342 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) bnx2x_get_pretend_reg() argument
10346 return base + (BP_ABS_FUNC(bp)) * stride; bnx2x_get_pretend_reg()
10349 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, bnx2x_prev_unload_close_umac() argument
10362 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); bnx2x_prev_unload_close_umac()
10363 REG_WR(bp, vals->umac_addr[port], 0); bnx2x_prev_unload_close_umac()
10368 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, bnx2x_prev_unload_close_mac() argument
10373 u8 port = BP_PORT(bp); bnx2x_prev_unload_close_mac()
10378 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); bnx2x_prev_unload_close_mac()
10380 if (!CHIP_IS_E3(bp)) { bnx2x_prev_unload_close_mac()
10381 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); bnx2x_prev_unload_close_mac()
10386 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM bnx2x_prev_unload_close_mac()
10388 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL bnx2x_prev_unload_close_mac()
10397 wb_data[0] = REG_RD(bp, base_addr + offset); bnx2x_prev_unload_close_mac()
10398 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); bnx2x_prev_unload_close_mac()
10403 REG_WR(bp, vals->bmac_addr, wb_data[0]); bnx2x_prev_unload_close_mac()
10404 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); bnx2x_prev_unload_close_mac()
10407 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; bnx2x_prev_unload_close_mac()
10408 vals->emac_val = REG_RD(bp, vals->emac_addr); bnx2x_prev_unload_close_mac()
10409 REG_WR(bp, vals->emac_addr, 0); bnx2x_prev_unload_close_mac()
10414 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; bnx2x_prev_unload_close_mac()
10415 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); bnx2x_prev_unload_close_mac()
10416 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, bnx2x_prev_unload_close_mac()
10418 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, bnx2x_prev_unload_close_mac()
10421 vals->xmac_val = REG_RD(bp, vals->xmac_addr); bnx2x_prev_unload_close_mac()
10422 REG_WR(bp, vals->xmac_addr, 0); bnx2x_prev_unload_close_mac()
10426 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, bnx2x_prev_unload_close_mac()
10428 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, bnx2x_prev_unload_close_mac()
10447 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) bnx2x_prev_is_after_undi() argument
10452 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & bnx2x_prev_is_after_undi()
10456 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { bnx2x_prev_is_after_undi()
10464 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) bnx2x_prev_unload_undi_inc() argument
10469 if (BP_FUNC(bp) < 2) bnx2x_prev_unload_undi_inc()
10470 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); bnx2x_prev_unload_undi_inc()
10472 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); bnx2x_prev_unload_undi_inc()
10474 tmp_reg = REG_RD(bp, addr); bnx2x_prev_unload_undi_inc()
10479 REG_WR(bp, addr, tmp_reg); bnx2x_prev_unload_undi_inc()
10482 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); bnx2x_prev_unload_undi_inc()
10485 static int bnx2x_prev_mcp_done(struct bnx2x *bp) bnx2x_prev_mcp_done() argument
10487 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, bnx2x_prev_mcp_done()
10498 bnx2x_prev_path_get_entry(struct bnx2x *bp) bnx2x_prev_path_get_entry() argument
10503 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && bnx2x_prev_path_get_entry()
10504 bp->pdev->bus->number == tmp_list->bus && bnx2x_prev_path_get_entry()
10505 BP_PATH(bp) == tmp_list->path) bnx2x_prev_path_get_entry()
10511 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) bnx2x_prev_path_mark_eeh() argument
10522 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_path_mark_eeh()
10528 BP_PATH(bp)); bnx2x_prev_path_mark_eeh()
10536 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) bnx2x_prev_is_path_marked() argument
10544 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_is_path_marked()
10548 BP_PATH(bp)); bnx2x_prev_is_path_marked()
10552 BP_PATH(bp)); bnx2x_prev_is_path_marked()
10561 bool bnx2x_port_after_undi(struct bnx2x *bp) bnx2x_port_after_undi() argument
10568 entry = bnx2x_prev_path_get_entry(bp); bnx2x_port_after_undi()
10569 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); bnx2x_port_after_undi()
10576 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) bnx2x_prev_mark_path() argument
10588 tmp_list = bnx2x_prev_path_get_entry(bp); bnx2x_prev_mark_path()
10594 BP_PATH(bp)); bnx2x_prev_mark_path()
10609 tmp_list->bus = bp->pdev->bus->number; bnx2x_prev_mark_path()
10610 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); bnx2x_prev_mark_path()
10611 tmp_list->path = BP_PATH(bp); bnx2x_prev_mark_path()
10613 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; bnx2x_prev_mark_path()
10621 BP_PATH(bp)); bnx2x_prev_mark_path()
10629 static int bnx2x_do_flr(struct bnx2x *bp) bnx2x_do_flr() argument
10631 struct pci_dev *dev = bp->pdev; bnx2x_do_flr()
10633 if (CHIP_IS_E1x(bp)) { bnx2x_do_flr()
10639 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { bnx2x_do_flr()
10641 bp->common.bc_ver); bnx2x_do_flr()
10649 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); bnx2x_do_flr()
10654 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) bnx2x_prev_unload_uncommon() argument
10661 if (bnx2x_prev_is_path_marked(bp)) bnx2x_prev_unload_uncommon()
10662 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_uncommon()
10667 if (bnx2x_prev_is_after_undi(bp)) bnx2x_prev_unload_uncommon()
10674 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); bnx2x_prev_unload_uncommon()
10679 rc = bnx2x_do_flr(bp); bnx2x_prev_unload_uncommon()
10692 rc = bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_uncommon()
10699 static int bnx2x_prev_unload_common(struct bnx2x *bp) bnx2x_prev_unload_common() argument
10713 if (bnx2x_prev_is_path_marked(bp)) bnx2x_prev_unload_common()
10714 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common()
10716 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); bnx2x_prev_unload_common()
10723 bnx2x_prev_unload_close_mac(bp, &mac_vals); bnx2x_prev_unload_common()
10726 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_prev_unload_common()
10727 bp->link_params.port ^= 1; bnx2x_prev_unload_common()
10728 bnx2x_set_rx_filter(&bp->link_params, 0); bnx2x_prev_unload_common()
10729 bp->link_params.port ^= 1; bnx2x_prev_unload_common()
10732 if (bnx2x_prev_is_after_undi(bp)) { bnx2x_prev_unload_common()
10735 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); bnx2x_prev_unload_common()
10737 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); bnx2x_prev_unload_common()
10739 if (!CHIP_IS_E1x(bp)) bnx2x_prev_unload_common()
10741 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); bnx2x_prev_unload_common()
10744 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); bnx2x_prev_unload_common()
10748 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); bnx2x_prev_unload_common()
10762 bnx2x_prev_unload_undi_inc(bp, 1); bnx2x_prev_unload_common()
10772 bnx2x_reset_common(bp); bnx2x_prev_unload_common()
10775 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); bnx2x_prev_unload_common()
10777 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); bnx2x_prev_unload_common()
10779 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); bnx2x_prev_unload_common()
10781 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); bnx2x_prev_unload_common()
10783 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); bnx2x_prev_unload_common()
10784 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); bnx2x_prev_unload_common()
10787 rc = bnx2x_prev_mark_path(bp, prev_undi); bnx2x_prev_unload_common()
10789 bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common()
10793 return bnx2x_prev_mcp_done(bp); bnx2x_prev_unload_common()
10796 static int bnx2x_prev_unload(struct bnx2x *bp) bnx2x_prev_unload() argument
10805 bnx2x_clean_pglue_errors(bp); bnx2x_prev_unload()
10808 hw_lock_reg = (BP_FUNC(bp) <= 5) ? bnx2x_prev_unload()
10809 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : bnx2x_prev_unload()
10810 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); bnx2x_prev_unload()
10812 hw_lock_val = REG_RD(bp, hw_lock_reg); bnx2x_prev_unload()
10816 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_prev_unload()
10817 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); bnx2x_prev_unload()
10821 REG_WR(bp, hw_lock_reg, 0xffffffff); bnx2x_prev_unload()
10825 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { bnx2x_prev_unload()
10827 bnx2x_release_alr(bp); bnx2x_prev_unload()
10833 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); bnx2x_prev_unload()
10846 aer = !!(bnx2x_prev_path_get_entry(bp) && bnx2x_prev_unload()
10847 bnx2x_prev_path_get_entry(bp)->aer); bnx2x_prev_unload()
10852 rc = bnx2x_prev_unload_common(bp); bnx2x_prev_unload()
10857 rc = bnx2x_prev_unload_uncommon(bp); bnx2x_prev_unload()
10870 if (bnx2x_port_after_undi(bp)) bnx2x_prev_unload()
10871 bp->link_params.feature_config_flags |= bnx2x_prev_unload()
10879 static void bnx2x_get_common_hwinfo(struct bnx2x *bp) bnx2x_get_common_hwinfo() argument
10886 val = REG_RD(bp, MISC_REG_CHIP_NUM); bnx2x_get_common_hwinfo()
10888 val = REG_RD(bp, MISC_REG_CHIP_REV); bnx2x_get_common_hwinfo()
10894 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); bnx2x_get_common_hwinfo()
10896 val = REG_RD(bp, MISC_REG_BOND_ID); bnx2x_get_common_hwinfo()
10898 bp->common.chip_id = id; bnx2x_get_common_hwinfo()
10901 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { bnx2x_get_common_hwinfo()
10902 if (CHIP_IS_57810(bp)) bnx2x_get_common_hwinfo()
10903 bp->common.chip_id = (CHIP_NUM_57811 << 16) | bnx2x_get_common_hwinfo()
10904 (bp->common.chip_id & 0x0000FFFF); bnx2x_get_common_hwinfo()
10905 else if (CHIP_IS_57810_MF(bp)) bnx2x_get_common_hwinfo()
10906 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | bnx2x_get_common_hwinfo()
10907 (bp->common.chip_id & 0x0000FFFF); bnx2x_get_common_hwinfo()
10908 bp->common.chip_id |= 0x1; bnx2x_get_common_hwinfo()
10912 bp->db_size = (1 << BNX2X_DB_SHIFT); bnx2x_get_common_hwinfo()
10914 if (!CHIP_IS_E1x(bp)) { bnx2x_get_common_hwinfo()
10915 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); bnx2x_get_common_hwinfo()
10917 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); bnx2x_get_common_hwinfo()
10922 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : bnx2x_get_common_hwinfo()
10925 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_get_common_hwinfo()
10926 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ bnx2x_get_common_hwinfo()
10928 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ bnx2x_get_common_hwinfo()
10930 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ bnx2x_get_common_hwinfo()
10931 bp->pfid = bp->pf_num; /* 0..7 */ bnx2x_get_common_hwinfo()
10934 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); bnx2x_get_common_hwinfo()
10936 bp->link_params.chip_id = bp->common.chip_id; bnx2x_get_common_hwinfo()
10939 val = (REG_RD(bp, 0x2874) & 0x55); bnx2x_get_common_hwinfo()
10940 if ((bp->common.chip_id & 0x1) || bnx2x_get_common_hwinfo()
10941 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { bnx2x_get_common_hwinfo()
10942 bp->flags |= ONE_PORT_FLAG; bnx2x_get_common_hwinfo()
10946 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); bnx2x_get_common_hwinfo()
10947 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << bnx2x_get_common_hwinfo()
10950 bp->common.flash_size, bp->common.flash_size); bnx2x_get_common_hwinfo()
10952 bnx2x_init_shmem(bp); bnx2x_get_common_hwinfo()
10954 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? bnx2x_get_common_hwinfo()
10958 bp->link_params.shmem_base = bp->common.shmem_base; bnx2x_get_common_hwinfo()
10959 bp->link_params.shmem2_base = bp->common.shmem2_base; bnx2x_get_common_hwinfo()
10960 if (SHMEM2_RD(bp, size) > bnx2x_get_common_hwinfo()
10961 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) bnx2x_get_common_hwinfo()
10962 bp->link_params.lfa_base = bnx2x_get_common_hwinfo()
10963 REG_RD(bp, bp->common.shmem2_base + bnx2x_get_common_hwinfo()
10965 lfa_host_addr[BP_PORT(bp)])); bnx2x_get_common_hwinfo()
10967 bp->link_params.lfa_base = 0; bnx2x_get_common_hwinfo()
10969 bp->common.shmem_base, bp->common.shmem2_base); bnx2x_get_common_hwinfo()
10971 if (!bp->common.shmem_base) { bnx2x_get_common_hwinfo()
10973 bp->flags |= NO_MCP_FLAG; bnx2x_get_common_hwinfo()
10977 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); bnx2x_get_common_hwinfo()
10978 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); bnx2x_get_common_hwinfo()
10980 bp->link_params.hw_led_mode = ((bp->common.hw_config & bnx2x_get_common_hwinfo()
10984 bp->link_params.feature_config_flags = 0; bnx2x_get_common_hwinfo()
10985 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); bnx2x_get_common_hwinfo()
10987 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
10990 bp->link_params.feature_config_flags &= bnx2x_get_common_hwinfo()
10993 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; bnx2x_get_common_hwinfo()
10994 bp->common.bc_ver = val; bnx2x_get_common_hwinfo()
11002 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
11006 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
11009 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
11012 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
11016 bp->link_params.feature_config_flags |= bnx2x_get_common_hwinfo()
11020 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? bnx2x_get_common_hwinfo()
11023 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? bnx2x_get_common_hwinfo()
11026 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? bnx2x_get_common_hwinfo()
11029 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? bnx2x_get_common_hwinfo()
11032 boot_mode = SHMEM_RD(bp, bnx2x_get_common_hwinfo()
11033 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & bnx2x_get_common_hwinfo()
11037 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; bnx2x_get_common_hwinfo()
11040 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; bnx2x_get_common_hwinfo()
11043 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; bnx2x_get_common_hwinfo()
11046 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; bnx2x_get_common_hwinfo()
11050 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); bnx2x_get_common_hwinfo()
11051 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; bnx2x_get_common_hwinfo()
11054 (bp->flags & NO_WOL_FLAG) ? "not " : ""); bnx2x_get_common_hwinfo()
11056 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); bnx2x_get_common_hwinfo()
11057 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); bnx2x_get_common_hwinfo()
11058 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); bnx2x_get_common_hwinfo()
11059 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); bnx2x_get_common_hwinfo()
11061 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", bnx2x_get_common_hwinfo()
11068 static int bnx2x_get_igu_cam_info(struct bnx2x *bp) bnx2x_get_igu_cam_info() argument
11070 int pfid = BP_FUNC(bp); bnx2x_get_igu_cam_info()
11075 bp->igu_base_sb = 0xff; bnx2x_get_igu_cam_info()
11076 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_get_igu_cam_info()
11077 int vn = BP_VN(bp); bnx2x_get_igu_cam_info()
11078 igu_sb_cnt = bp->igu_sb_cnt; bnx2x_get_igu_cam_info()
11079 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * bnx2x_get_igu_cam_info()
11082 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + bnx2x_get_igu_cam_info()
11083 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); bnx2x_get_igu_cam_info()
11091 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); bnx2x_get_igu_cam_info()
11100 bp->igu_dsb_id = igu_sb_id; bnx2x_get_igu_cam_info()
11102 if (bp->igu_base_sb == 0xff) bnx2x_get_igu_cam_info()
11103 bp->igu_base_sb = igu_sb_id; bnx2x_get_igu_cam_info()
11116 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); bnx2x_get_igu_cam_info()
11127 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) bnx2x_link_settings_supported() argument
11129 int cfg_size = 0, idx, port = BP_PORT(bp); bnx2x_link_settings_supported()
11132 bp->port.supported[0] = 0; bnx2x_link_settings_supported()
11133 bp->port.supported[1] = 0; bnx2x_link_settings_supported()
11134 switch (bp->link_params.num_phys) { bnx2x_link_settings_supported()
11136 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; bnx2x_link_settings_supported()
11140 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported()
11144 if (bp->link_params.multi_phy_config & bnx2x_link_settings_supported()
11146 bp->port.supported[1] = bnx2x_link_settings_supported()
11147 bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported()
11148 bp->port.supported[0] = bnx2x_link_settings_supported()
11149 bp->link_params.phy[EXT_PHY2].supported; bnx2x_link_settings_supported()
11151 bp->port.supported[0] = bnx2x_link_settings_supported()
11152 bp->link_params.phy[EXT_PHY1].supported; bnx2x_link_settings_supported()
11153 bp->port.supported[1] = bnx2x_link_settings_supported()
11154 bp->link_params.phy[EXT_PHY2].supported; bnx2x_link_settings_supported()
11160 if (!(bp->port.supported[0] || bp->port.supported[1])) { bnx2x_link_settings_supported()
11162 SHMEM_RD(bp, bnx2x_link_settings_supported()
11164 SHMEM_RD(bp, bnx2x_link_settings_supported()
11169 if (CHIP_IS_E3(bp)) bnx2x_link_settings_supported()
11170 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); bnx2x_link_settings_supported()
11174 bp->port.phy_addr = REG_RD( bnx2x_link_settings_supported()
11175 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); bnx2x_link_settings_supported()
11178 bp->port.phy_addr = REG_RD( bnx2x_link_settings_supported()
11179 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); bnx2x_link_settings_supported()
11183 bp->port.link_config[0]); bnx2x_link_settings_supported()
11187 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); bnx2x_link_settings_supported()
11190 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11192 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; bnx2x_link_settings_supported()
11194 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11196 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; bnx2x_link_settings_supported()
11198 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11200 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; bnx2x_link_settings_supported()
11202 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11204 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; bnx2x_link_settings_supported()
11206 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11208 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | bnx2x_link_settings_supported()
11211 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11213 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; bnx2x_link_settings_supported()
11215 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11217 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; bnx2x_link_settings_supported()
11219 if (!(bp->link_params.speed_cap_mask[idx] & bnx2x_link_settings_supported()
11221 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; bnx2x_link_settings_supported()
11224 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], bnx2x_link_settings_supported()
11225 bp->port.supported[1]); bnx2x_link_settings_supported()
11228 static void bnx2x_link_settings_requested(struct bnx2x *bp) bnx2x_link_settings_requested() argument
11231 bp->port.advertising[0] = 0; bnx2x_link_settings_requested()
11232 bp->port.advertising[1] = 0; bnx2x_link_settings_requested()
11233 switch (bp->link_params.num_phys) { bnx2x_link_settings_requested()
11243 bp->link_params.req_duplex[idx] = DUPLEX_FULL; bnx2x_link_settings_requested()
11244 link_config = bp->port.link_config[idx]; bnx2x_link_settings_requested()
11247 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { bnx2x_link_settings_requested()
11248 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11250 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11251 bp->port.supported[idx]; bnx2x_link_settings_requested()
11252 if (bp->link_params.phy[EXT_PHY1].type == bnx2x_link_settings_requested()
11254 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11259 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11261 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11269 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { bnx2x_link_settings_requested()
11270 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11272 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11278 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11284 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { bnx2x_link_settings_requested()
11285 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11287 bp->link_params.req_duplex[idx] = bnx2x_link_settings_requested()
11289 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11295 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11301 if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11303 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11305 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11311 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11317 if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11319 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11321 bp->link_params.req_duplex[idx] = bnx2x_link_settings_requested()
11323 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11329 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11335 if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11337 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11339 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11342 } else if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11344 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11346 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11351 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11357 if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11359 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11361 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11367 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11373 if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11375 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11377 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11380 } else if (bp->port.supported[idx] & bnx2x_link_settings_requested()
11382 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11384 bp->port.advertising[idx] |= bnx2x_link_settings_requested()
11390 bp->link_params.speed_cap_mask[idx]); bnx2x_link_settings_requested()
11395 bp->link_params.req_line_speed[idx] = SPEED_20000; bnx2x_link_settings_requested()
11401 bp->link_params.req_line_speed[idx] = bnx2x_link_settings_requested()
11403 bp->port.advertising[idx] = bnx2x_link_settings_requested()
11404 bp->port.supported[idx]; bnx2x_link_settings_requested()
11408 bp->link_params.req_flow_ctrl[idx] = (link_config & bnx2x_link_settings_requested()
11410 if (bp->link_params.req_flow_ctrl[idx] == bnx2x_link_settings_requested()
11412 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) bnx2x_link_settings_requested()
11413 bp->link_params.req_flow_ctrl[idx] = bnx2x_link_settings_requested()
11416 bnx2x_set_requested_fc(bp); bnx2x_link_settings_requested()
11420 bp->link_params.req_line_speed[idx], bnx2x_link_settings_requested()
11421 bp->link_params.req_duplex[idx], bnx2x_link_settings_requested()
11422 bp->link_params.req_flow_ctrl[idx], bnx2x_link_settings_requested()
11423 bp->port.advertising[idx]); bnx2x_link_settings_requested()
11435 static void bnx2x_get_port_hwinfo(struct bnx2x *bp) bnx2x_get_port_hwinfo() argument
11437 int port = BP_PORT(bp); bnx2x_get_port_hwinfo()
11441 bp->link_params.bp = bp; bnx2x_get_port_hwinfo()
11442 bp->link_params.port = port; bnx2x_get_port_hwinfo()
11444 bp->link_params.lane_config = bnx2x_get_port_hwinfo()
11445 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); bnx2x_get_port_hwinfo()
11447 bp->link_params.speed_cap_mask[0] = bnx2x_get_port_hwinfo()
11448 SHMEM_RD(bp, bnx2x_get_port_hwinfo()
11451 bp->link_params.speed_cap_mask[1] = bnx2x_get_port_hwinfo()
11452 SHMEM_RD(bp, bnx2x_get_port_hwinfo()
11455 bp->port.link_config[0] = bnx2x_get_port_hwinfo()
11456 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); bnx2x_get_port_hwinfo()
11458 bp->port.link_config[1] = bnx2x_get_port_hwinfo()
11459 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); bnx2x_get_port_hwinfo()
11461 bp->link_params.multi_phy_config = bnx2x_get_port_hwinfo()
11462 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); bnx2x_get_port_hwinfo()
11466 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); bnx2x_get_port_hwinfo()
11467 bp->wol = (!(bp->flags & NO_WOL_FLAG) && bnx2x_get_port_hwinfo()
11471 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) bnx2x_get_port_hwinfo()
11472 bp->flags |= NO_ISCSI_FLAG; bnx2x_get_port_hwinfo()
11474 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) bnx2x_get_port_hwinfo()
11475 bp->flags |= NO_FCOE_FLAG; bnx2x_get_port_hwinfo()
11478 bp->link_params.lane_config, bnx2x_get_port_hwinfo()
11479 bp->link_params.speed_cap_mask[0], bnx2x_get_port_hwinfo()
11480 bp->port.link_config[0]); bnx2x_get_port_hwinfo()
11482 bp->link_params.switch_cfg = (bp->port.link_config[0] & bnx2x_get_port_hwinfo()
11484 bnx2x_phy_probe(&bp->link_params); bnx2x_get_port_hwinfo()
11485 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); bnx2x_get_port_hwinfo()
11487 bnx2x_link_settings_requested(bp); bnx2x_get_port_hwinfo()
11494 SHMEM_RD(bp, bnx2x_get_port_hwinfo()
11498 bp->mdio.prtad = bp->port.phy_addr; bnx2x_get_port_hwinfo()
11502 bp->mdio.prtad = bnx2x_get_port_hwinfo()
11506 eee_mode = (((SHMEM_RD(bp, dev_info. bnx2x_get_port_hwinfo()
11511 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | bnx2x_get_port_hwinfo()
11515 bp->link_params.eee_mode = 0; bnx2x_get_port_hwinfo()
11519 void bnx2x_get_iscsi_info(struct bnx2x *bp) bnx2x_get_iscsi_info() argument
11522 int port = BP_PORT(bp); bnx2x_get_iscsi_info()
11523 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, bnx2x_get_iscsi_info()
11526 if (!CNIC_SUPPORT(bp)) { bnx2x_get_iscsi_info()
11527 bp->flags |= no_flags; bnx2x_get_iscsi_info()
11532 bp->cnic_eth_dev.max_iscsi_conn = bnx2x_get_iscsi_info()
11537 bp->cnic_eth_dev.max_iscsi_conn); bnx2x_get_iscsi_info()
11543 if (!bp->cnic_eth_dev.max_iscsi_conn) bnx2x_get_iscsi_info()
11544 bp->flags |= no_flags; bnx2x_get_iscsi_info()
11547 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) bnx2x_get_ext_wwn_info() argument
11550 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = bnx2x_get_ext_wwn_info()
11551 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); bnx2x_get_ext_wwn_info()
11552 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = bnx2x_get_ext_wwn_info()
11553 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); bnx2x_get_ext_wwn_info()
11556 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = bnx2x_get_ext_wwn_info()
11557 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); bnx2x_get_ext_wwn_info()
11558 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = bnx2x_get_ext_wwn_info()
11559 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); bnx2x_get_ext_wwn_info()
11562 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) bnx2x_shared_fcoe_funcs() argument
11566 if (IS_MF(bp)) { bnx2x_shared_fcoe_funcs()
11570 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { bnx2x_shared_fcoe_funcs()
11571 if (IS_MF_SD(bp)) { bnx2x_shared_fcoe_funcs()
11572 u32 cfg = MF_CFG_RD(bp, bnx2x_shared_fcoe_funcs()
11580 u32 cfg = MF_CFG_RD(bp, bnx2x_shared_fcoe_funcs()
11590 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; bnx2x_shared_fcoe_funcs()
11593 u32 lic = SHMEM_RD(bp, bnx2x_shared_fcoe_funcs()
11604 static void bnx2x_get_fcoe_info(struct bnx2x *bp) bnx2x_get_fcoe_info() argument
11606 int port = BP_PORT(bp); bnx2x_get_fcoe_info()
11607 int func = BP_ABS_FUNC(bp); bnx2x_get_fcoe_info()
11608 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, bnx2x_get_fcoe_info()
11610 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); bnx2x_get_fcoe_info()
11612 if (!CNIC_SUPPORT(bp)) { bnx2x_get_fcoe_info()
11613 bp->flags |= NO_FCOE_FLAG; bnx2x_get_fcoe_info()
11618 bp->cnic_eth_dev.max_fcoe_conn = bnx2x_get_fcoe_info()
11623 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; bnx2x_get_fcoe_info()
11627 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; bnx2x_get_fcoe_info()
11630 if (!IS_MF(bp)) { bnx2x_get_fcoe_info()
11632 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = bnx2x_get_fcoe_info()
11633 SHMEM_RD(bp, bnx2x_get_fcoe_info()
11636 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = bnx2x_get_fcoe_info()
11637 SHMEM_RD(bp, bnx2x_get_fcoe_info()
11642 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = bnx2x_get_fcoe_info()
11643 SHMEM_RD(bp, bnx2x_get_fcoe_info()
11646 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = bnx2x_get_fcoe_info()
11647 SHMEM_RD(bp, bnx2x_get_fcoe_info()
11650 } else if (!IS_MF_SD(bp)) { bnx2x_get_fcoe_info()
11654 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) bnx2x_get_fcoe_info()
11655 bnx2x_get_ext_wwn_info(bp, func); bnx2x_get_fcoe_info()
11657 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_fcoe_info()
11658 bnx2x_get_ext_wwn_info(bp, func); bnx2x_get_fcoe_info()
11661 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); bnx2x_get_fcoe_info()
11667 if (!bp->cnic_eth_dev.max_fcoe_conn) bnx2x_get_fcoe_info()
11668 bp->flags |= NO_FCOE_FLAG; bnx2x_get_fcoe_info()
11671 static void bnx2x_get_cnic_info(struct bnx2x *bp) bnx2x_get_cnic_info() argument
11678 bnx2x_get_iscsi_info(bp); bnx2x_get_cnic_info()
11679 bnx2x_get_fcoe_info(bp); bnx2x_get_cnic_info()
11682 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_mac_hwinfo() argument
11685 int func = BP_ABS_FUNC(bp); bnx2x_get_cnic_mac_hwinfo()
11686 int port = BP_PORT(bp); bnx2x_get_cnic_mac_hwinfo()
11687 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; bnx2x_get_cnic_mac_hwinfo()
11688 u8 *fip_mac = bp->fip_mac; bnx2x_get_cnic_mac_hwinfo()
11690 if (IS_MF(bp)) { bnx2x_get_cnic_mac_hwinfo()
11696 if (!IS_MF_SD(bp)) { bnx2x_get_cnic_mac_hwinfo()
11697 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); bnx2x_get_cnic_mac_hwinfo()
11699 val2 = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo()
11701 val = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo()
11707 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; bnx2x_get_cnic_mac_hwinfo()
11711 val2 = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo()
11713 val = MF_CFG_RD(bp, func_ext_config[func]. bnx2x_get_cnic_mac_hwinfo()
11719 bp->flags |= NO_FCOE_FLAG; bnx2x_get_cnic_mac_hwinfo()
11722 bp->mf_ext_config = cfg; bnx2x_get_cnic_mac_hwinfo()
11725 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { bnx2x_get_cnic_mac_hwinfo()
11727 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo()
11732 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { bnx2x_get_cnic_mac_hwinfo()
11734 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo()
11745 if (IS_MF_FCOE_AFEX(bp)) bnx2x_get_cnic_mac_hwinfo()
11746 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); bnx2x_get_cnic_mac_hwinfo()
11748 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo()
11750 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo()
11754 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo()
11756 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. bnx2x_get_cnic_mac_hwinfo()
11763 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; bnx2x_get_cnic_mac_hwinfo()
11769 bp->flags |= NO_FCOE_FLAG; bnx2x_get_cnic_mac_hwinfo()
11770 eth_zero_addr(bp->fip_mac); bnx2x_get_cnic_mac_hwinfo()
11774 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_get_mac_hwinfo() argument
11777 int func = BP_ABS_FUNC(bp); bnx2x_get_mac_hwinfo()
11778 int port = BP_PORT(bp); bnx2x_get_mac_hwinfo()
11781 eth_zero_addr(bp->dev->dev_addr); bnx2x_get_mac_hwinfo()
11783 if (BP_NOMCP(bp)) { bnx2x_get_mac_hwinfo()
11785 eth_hw_addr_random(bp->dev); bnx2x_get_mac_hwinfo()
11786 } else if (IS_MF(bp)) { bnx2x_get_mac_hwinfo()
11787 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); bnx2x_get_mac_hwinfo()
11788 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); bnx2x_get_mac_hwinfo()
11791 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); bnx2x_get_mac_hwinfo()
11793 if (CNIC_SUPPORT(bp)) bnx2x_get_mac_hwinfo()
11794 bnx2x_get_cnic_mac_hwinfo(bp); bnx2x_get_mac_hwinfo()
11797 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); bnx2x_get_mac_hwinfo()
11798 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); bnx2x_get_mac_hwinfo()
11799 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); bnx2x_get_mac_hwinfo()
11801 if (CNIC_SUPPORT(bp)) bnx2x_get_mac_hwinfo()
11802 bnx2x_get_cnic_mac_hwinfo(bp); bnx2x_get_mac_hwinfo()
11805 if (!BP_NOMCP(bp)) { bnx2x_get_mac_hwinfo()
11807 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); bnx2x_get_mac_hwinfo()
11808 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); bnx2x_get_mac_hwinfo()
11809 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); bnx2x_get_mac_hwinfo()
11810 bp->flags |= HAS_PHYS_PORT_ID; bnx2x_get_mac_hwinfo()
11813 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); bnx2x_get_mac_hwinfo()
11815 if (!is_valid_ether_addr(bp->dev->dev_addr)) bnx2x_get_mac_hwinfo()
11816 dev_err(&bp->pdev->dev, bnx2x_get_mac_hwinfo()
11819 bp->dev->dev_addr); bnx2x_get_mac_hwinfo()
11822 static bool bnx2x_get_dropless_info(struct bnx2x *bp) bnx2x_get_dropless_info() argument
11827 if (IS_VF(bp)) bnx2x_get_dropless_info()
11830 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { bnx2x_get_dropless_info()
11832 tmp = BP_ABS_FUNC(bp); bnx2x_get_dropless_info()
11833 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); bnx2x_get_dropless_info()
11837 tmp = BP_PORT(bp); bnx2x_get_dropless_info()
11838 cfg = SHMEM_RD(bp, bnx2x_get_dropless_info()
11845 static void validate_set_si_mode(struct bnx2x *bp) validate_set_si_mode() argument
11847 u8 func = BP_ABS_FUNC(bp); validate_set_si_mode()
11850 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); validate_set_si_mode()
11854 bp->mf_mode = MULTI_FUNCTION_SI; validate_set_si_mode()
11855 bp->mf_config[BP_VN(bp)] = validate_set_si_mode()
11856 MF_CFG_RD(bp, func_mf_config[func].config); validate_set_si_mode()
11861 static int bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_hwinfo() argument
11863 int /*abs*/func = BP_ABS_FUNC(bp); bnx2x_get_hwinfo()
11869 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { bnx2x_get_hwinfo()
11870 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo()
11875 bnx2x_get_common_hwinfo(bp); bnx2x_get_hwinfo()
11880 if (CHIP_IS_E1x(bp)) { bnx2x_get_hwinfo()
11881 bp->common.int_block = INT_BLOCK_HC; bnx2x_get_hwinfo()
11883 bp->igu_dsb_id = DEF_SB_IGU_ID; bnx2x_get_hwinfo()
11884 bp->igu_base_sb = 0; bnx2x_get_hwinfo()
11886 bp->common.int_block = INT_BLOCK_IGU; bnx2x_get_hwinfo()
11889 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_get_hwinfo()
11891 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); bnx2x_get_hwinfo()
11899 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); bnx2x_get_hwinfo()
11900 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); bnx2x_get_hwinfo()
11902 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { bnx2x_get_hwinfo()
11907 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { bnx2x_get_hwinfo()
11908 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo()
11910 bnx2x_release_hw_lock(bp, bnx2x_get_hwinfo()
11918 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; bnx2x_get_hwinfo()
11922 rc = bnx2x_get_igu_cam_info(bp); bnx2x_get_hwinfo()
11923 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); bnx2x_get_hwinfo()
11933 if (CHIP_IS_E1x(bp)) bnx2x_get_hwinfo()
11934 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); bnx2x_get_hwinfo()
11940 bp->base_fw_ndsb = bp->igu_base_sb; bnx2x_get_hwinfo()
11943 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, bnx2x_get_hwinfo()
11944 bp->igu_sb_cnt, bp->base_fw_ndsb); bnx2x_get_hwinfo()
11950 bp->mf_ov = 0; bnx2x_get_hwinfo()
11951 bp->mf_mode = 0; bnx2x_get_hwinfo()
11952 bp->mf_sub_mode = 0; bnx2x_get_hwinfo()
11953 vn = BP_VN(bp); bnx2x_get_hwinfo()
11954 mfw_vn = BP_FW_MB_IDX(bp); bnx2x_get_hwinfo()
11956 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { bnx2x_get_hwinfo()
11958 bp->common.shmem2_base, SHMEM2_RD(bp, size), bnx2x_get_hwinfo()
11961 if (SHMEM2_HAS(bp, mf_cfg_addr)) bnx2x_get_hwinfo()
11962 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); bnx2x_get_hwinfo()
11964 bp->common.mf_cfg_base = bp->common.shmem_base + bnx2x_get_hwinfo()
11975 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { bnx2x_get_hwinfo()
11977 val = SHMEM_RD(bp, bnx2x_get_hwinfo()
11983 validate_set_si_mode(bp); bnx2x_get_hwinfo()
11986 if ((!CHIP_IS_E1x(bp)) && bnx2x_get_hwinfo()
11987 (MF_CFG_RD(bp, func_mf_config[func]. bnx2x_get_hwinfo()
11989 (SHMEM2_HAS(bp, bnx2x_get_hwinfo()
11991 bp->mf_mode = MULTI_FUNCTION_AFEX; bnx2x_get_hwinfo()
11992 bp->mf_config[vn] = MF_CFG_RD(bp, bnx2x_get_hwinfo()
12000 val = MF_CFG_RD(bp, bnx2x_get_hwinfo()
12005 bp->mf_mode = MULTI_FUNCTION_SD; bnx2x_get_hwinfo()
12006 bp->mf_config[vn] = MF_CFG_RD(bp, bnx2x_get_hwinfo()
12012 bp->mf_mode = MULTI_FUNCTION_SD; bnx2x_get_hwinfo()
12013 bp->mf_sub_mode = SUB_MF_MODE_BD; bnx2x_get_hwinfo()
12014 bp->mf_config[vn] = bnx2x_get_hwinfo()
12015 MF_CFG_RD(bp, bnx2x_get_hwinfo()
12018 if (SHMEM2_HAS(bp, mtu_size)) { bnx2x_get_hwinfo()
12019 int mtu_idx = BP_FW_MB_IDX(bp); bnx2x_get_hwinfo()
12023 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); bnx2x_get_hwinfo()
12033 bp->dev->mtu = mtu_size; bnx2x_get_hwinfo()
12037 bp->mf_mode = MULTI_FUNCTION_SD; bnx2x_get_hwinfo()
12038 bp->mf_sub_mode = SUB_MF_MODE_UFP; bnx2x_get_hwinfo()
12039 bp->mf_config[vn] = bnx2x_get_hwinfo()
12040 MF_CFG_RD(bp, bnx2x_get_hwinfo()
12044 bp->mf_config[vn] = 0; bnx2x_get_hwinfo()
12047 val2 = SHMEM_RD(bp, bnx2x_get_hwinfo()
12052 validate_set_si_mode(bp); bnx2x_get_hwinfo()
12053 bp->mf_sub_mode = bnx2x_get_hwinfo()
12058 bp->mf_config[vn] = 0; bnx2x_get_hwinfo()
12065 bp->mf_config[vn] = 0; bnx2x_get_hwinfo()
12071 IS_MF(bp) ? "multi" : "single"); bnx2x_get_hwinfo()
12073 switch (bp->mf_mode) { bnx2x_get_hwinfo()
12075 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & bnx2x_get_hwinfo()
12078 bp->mf_ov = val; bnx2x_get_hwinfo()
12079 bp->path_has_ovlan = true; bnx2x_get_hwinfo()
12082 func, bp->mf_ov, bp->mf_ov); bnx2x_get_hwinfo()
12083 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || bnx2x_get_hwinfo()
12084 (bp->mf_sub_mode == SUB_MF_MODE_BD)) { bnx2x_get_hwinfo()
12085 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo()
12088 bp->path_has_ovlan = true; bnx2x_get_hwinfo()
12090 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo()
12105 dev_err(&bp->pdev->dev, bnx2x_get_hwinfo()
12118 if (CHIP_MODE_IS_4_PORT(bp) && bnx2x_get_hwinfo()
12119 !bp->path_has_ovlan && bnx2x_get_hwinfo()
12120 !IS_MF(bp) && bnx2x_get_hwinfo()
12121 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { bnx2x_get_hwinfo()
12122 u8 other_port = !BP_PORT(bp); bnx2x_get_hwinfo()
12123 u8 other_func = BP_PATH(bp) + 2*other_port; bnx2x_get_hwinfo()
12124 val = MF_CFG_RD(bp, bnx2x_get_hwinfo()
12127 bp->path_has_ovlan = true; bnx2x_get_hwinfo()
12132 if (CHIP_IS_E1H(bp) && IS_MF(bp)) bnx2x_get_hwinfo()
12133 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); bnx2x_get_hwinfo()
12136 bnx2x_get_port_hwinfo(bp); bnx2x_get_hwinfo()
12139 bnx2x_get_mac_hwinfo(bp); bnx2x_get_hwinfo()
12141 bnx2x_get_cnic_info(bp); bnx2x_get_hwinfo()
12146 static void bnx2x_read_fwinfo(struct bnx2x *bp) bnx2x_read_fwinfo() argument
12156 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); bnx2x_read_fwinfo()
12157 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); bnx2x_read_fwinfo()
12182 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, bnx2x_read_fwinfo()
12219 memcpy(bp->fw_ver, &vpd_data[rodi], len); bnx2x_read_fwinfo()
12220 bp->fw_ver[len] = ' '; bnx2x_read_fwinfo()
12231 static void bnx2x_set_modes_bitmap(struct bnx2x *bp) bnx2x_set_modes_bitmap() argument
12235 if (CHIP_REV_IS_FPGA(bp)) bnx2x_set_modes_bitmap()
12237 else if (CHIP_REV_IS_EMUL(bp)) bnx2x_set_modes_bitmap()
12242 if (CHIP_MODE_IS_4_PORT(bp)) bnx2x_set_modes_bitmap()
12247 if (CHIP_IS_E2(bp)) bnx2x_set_modes_bitmap()
12249 else if (CHIP_IS_E3(bp)) { bnx2x_set_modes_bitmap()
12251 if (CHIP_REV(bp) == CHIP_REV_Ax) bnx2x_set_modes_bitmap()
12253 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ bnx2x_set_modes_bitmap()
12257 if (IS_MF(bp)) { bnx2x_set_modes_bitmap()
12259 switch (bp->mf_mode) { bnx2x_set_modes_bitmap()
12278 INIT_MODE_FLAGS(bp) = flags; bnx2x_set_modes_bitmap()
12281 static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_init_bp() argument
12286 mutex_init(&bp->port.phy_mutex); bnx2x_init_bp()
12287 mutex_init(&bp->fw_mb_mutex); bnx2x_init_bp()
12288 mutex_init(&bp->drv_info_mutex); bnx2x_init_bp()
12289 sema_init(&bp->stats_lock, 1); bnx2x_init_bp()
12290 bp->drv_info_mng_owner = false; bnx2x_init_bp()
12291 INIT_LIST_HEAD(&bp->vlan_reg); bnx2x_init_bp()
12293 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); bnx2x_init_bp()
12294 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); bnx2x_init_bp()
12295 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); bnx2x_init_bp()
12296 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); bnx2x_init_bp()
12297 if (IS_PF(bp)) { bnx2x_init_bp()
12298 rc = bnx2x_get_hwinfo(bp); bnx2x_init_bp()
12302 eth_zero_addr(bp->dev->dev_addr); bnx2x_init_bp()
12305 bnx2x_set_modes_bitmap(bp); bnx2x_init_bp()
12307 rc = bnx2x_alloc_mem_bp(bp); bnx2x_init_bp()
12311 bnx2x_read_fwinfo(bp); bnx2x_init_bp()
12313 func = BP_FUNC(bp); bnx2x_init_bp()
12316 if (IS_PF(bp) && !BP_NOMCP(bp)) { bnx2x_init_bp()
12318 bp->fw_seq = bnx2x_init_bp()
12319 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_init_bp()
12321 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); bnx2x_init_bp()
12323 rc = bnx2x_prev_unload(bp); bnx2x_init_bp()
12325 bnx2x_free_mem_bp(bp); bnx2x_init_bp()
12330 if (CHIP_REV_IS_FPGA(bp)) bnx2x_init_bp()
12331 dev_err(&bp->pdev->dev, "FPGA detected\n"); bnx2x_init_bp()
12333 if (BP_NOMCP(bp) && (func == 0)) bnx2x_init_bp()
12334 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bnx2x_init_bp()
12336 bp->disable_tpa = disable_tpa; bnx2x_init_bp()
12337 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); bnx2x_init_bp()
12339 bp->disable_tpa |= is_kdump_kernel(); bnx2x_init_bp()
12342 if (bp->disable_tpa) { bnx2x_init_bp()
12343 bp->dev->hw_features &= ~NETIF_F_LRO; bnx2x_init_bp()
12344 bp->dev->features &= ~NETIF_F_LRO; bnx2x_init_bp()
12347 if (CHIP_IS_E1(bp)) bnx2x_init_bp()
12348 bp->dropless_fc = 0; bnx2x_init_bp()
12350 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); bnx2x_init_bp()
12352 bp->mrrs = mrrs; bnx2x_init_bp()
12354 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; bnx2x_init_bp()
12355 if (IS_VF(bp)) bnx2x_init_bp()
12356 bp->rx_ring_size = MAX_RX_AVAIL; bnx2x_init_bp()
12359 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; bnx2x_init_bp()
12360 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; bnx2x_init_bp()
12362 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; bnx2x_init_bp()
12364 init_timer(&bp->timer); bnx2x_init_bp()
12365 bp->timer.expires = jiffies + bp->current_interval; bnx2x_init_bp()
12366 bp->timer.data = (unsigned long) bp; bnx2x_init_bp()
12367 bp->timer.function = bnx2x_timer; bnx2x_init_bp()
12369 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && bnx2x_init_bp()
12370 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && bnx2x_init_bp()
12371 SHMEM2_RD(bp, dcbx_lldp_params_offset) && bnx2x_init_bp()
12372 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { bnx2x_init_bp()
12373 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_init_bp()
12374 bnx2x_dcbx_init_params(bp); bnx2x_init_bp()
12376 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); bnx2x_init_bp()
12379 if (CHIP_IS_E1x(bp)) bnx2x_init_bp()
12380 bp->cnic_base_cl_id = FP_SB_MAX_E1x; bnx2x_init_bp()
12382 bp->cnic_base_cl_id = FP_SB_MAX_E2; bnx2x_init_bp()
12385 if (IS_VF(bp)) bnx2x_init_bp()
12386 bp->max_cos = 1; bnx2x_init_bp()
12387 else if (CHIP_IS_E1x(bp)) bnx2x_init_bp()
12388 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; bnx2x_init_bp()
12389 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) bnx2x_init_bp()
12390 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; bnx2x_init_bp()
12391 else if (CHIP_IS_E3B0(bp)) bnx2x_init_bp()
12392 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; bnx2x_init_bp()
12395 CHIP_NUM(bp), CHIP_REV(bp)); bnx2x_init_bp()
12396 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); bnx2x_init_bp()
12402 if (IS_VF(bp)) bnx2x_init_bp()
12403 bp->min_msix_vec_cnt = 1; bnx2x_init_bp()
12404 else if (CNIC_SUPPORT(bp)) bnx2x_init_bp()
12405 bp->min_msix_vec_cnt = 3; bnx2x_init_bp()
12407 bp->min_msix_vec_cnt = 2; bnx2x_init_bp()
12408 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); bnx2x_init_bp()
12410 bp->dump_preset_idx = 1; bnx2x_init_bp()
12412 if (CHIP_IS_E3B0(bp)) bnx2x_init_bp()
12413 bp->flags |= PTP_SUPPORTED; bnx2x_init_bp()
12429 struct bnx2x *bp = netdev_priv(dev); bnx2x_open() local
12432 bp->stats_init = true; bnx2x_open()
12436 bnx2x_set_power_state(bp, PCI_D0); bnx2x_open()
12444 if (IS_PF(bp)) { bnx2x_open()
12445 int other_engine = BP_PATH(bp) ? 0 : 1; bnx2x_open()
12449 other_load_status = bnx2x_get_load_status(bp, other_engine); bnx2x_open()
12450 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); bnx2x_open()
12451 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || bnx2x_open()
12452 bnx2x_chk_parity_attn(bp, &global, true)) { bnx2x_open()
12460 bnx2x_set_reset_global(bp); bnx2x_open()
12469 bnx2x_trylock_leader_lock(bp) && bnx2x_open()
12470 !bnx2x_leader_reset(bp)) { bnx2x_open()
12471 netdev_info(bp->dev, bnx2x_open()
12477 bnx2x_set_power_state(bp, PCI_D3hot); bnx2x_open()
12478 bp->recovery_state = BNX2X_RECOVERY_FAILED; bnx2x_open()
12488 bp->recovery_state = BNX2X_RECOVERY_DONE; bnx2x_open()
12489 rc = bnx2x_nic_load(bp, LOAD_OPEN); bnx2x_open()
12494 if (IS_PF(bp)) bnx2x_open()
12504 struct bnx2x *bp = netdev_priv(dev); bnx2x_close() local
12507 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_close()
12512 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, bnx2x_init_mcast_macs_list() argument
12515 int mc_count = netdev_mc_count(bp->dev); bnx2x_init_mcast_macs_list()
12525 netdev_for_each_mc_addr(ha, bp->dev) { bnx2x_init_mcast_macs_list()
12550 * @bp: driver handle
12554 static int bnx2x_set_uc_list(struct bnx2x *bp) bnx2x_set_uc_list() argument
12557 struct net_device *dev = bp->dev; bnx2x_set_uc_list()
12559 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; bnx2x_set_uc_list()
12563 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); bnx2x_set_uc_list()
12570 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, netdev_for_each_uc_addr()
12588 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12592 static int bnx2x_set_mc_list(struct bnx2x *bp) bnx2x_set_mc_list() argument
12594 struct net_device *dev = bp->dev; bnx2x_set_mc_list()
12598 rparam.mcast_obj = &bp->mcast_obj; bnx2x_set_mc_list()
12601 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); bnx2x_set_mc_list()
12609 rc = bnx2x_init_mcast_macs_list(bp, &rparam); bnx2x_set_mc_list()
12617 rc = bnx2x_config_mcast(bp, &rparam, bnx2x_set_mc_list()
12629 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ bnx2x_set_rx_mode()
12632 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rx_mode() local
12634 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_set_rx_mode()
12635 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); bnx2x_set_rx_mode()
12639 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, bnx2x_set_rx_mode()
12644 void bnx2x_set_rx_mode_inner(struct bnx2x *bp) bnx2x_set_rx_mode_inner() argument
12648 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); bnx2x_set_rx_mode_inner()
12650 netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12652 if (bp->dev->flags & IFF_PROMISC) { bnx2x_set_rx_mode_inner()
12654 } else if ((bp->dev->flags & IFF_ALLMULTI) || bnx2x_set_rx_mode_inner()
12655 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && bnx2x_set_rx_mode_inner()
12656 CHIP_IS_E1(bp))) { bnx2x_set_rx_mode_inner()
12659 if (IS_PF(bp)) { bnx2x_set_rx_mode_inner()
12661 if (bnx2x_set_mc_list(bp) < 0) bnx2x_set_rx_mode_inner()
12665 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12666 if (bnx2x_set_uc_list(bp) < 0) bnx2x_set_rx_mode_inner()
12668 netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12673 bnx2x_schedule_sp_rtnl(bp, bnx2x_set_rx_mode_inner()
12678 bp->rx_mode = rx_mode; bnx2x_set_rx_mode_inner()
12680 if (IS_MF_ISCSI_ONLY(bp)) bnx2x_set_rx_mode_inner()
12681 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_set_rx_mode_inner()
12684 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { bnx2x_set_rx_mode_inner()
12685 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); bnx2x_set_rx_mode_inner()
12686 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12690 if (IS_PF(bp)) { bnx2x_set_rx_mode_inner()
12691 bnx2x_set_storm_rx_mode(bp); bnx2x_set_rx_mode_inner()
12692 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12698 netif_addr_unlock_bh(bp->dev); bnx2x_set_rx_mode_inner()
12699 bnx2x_vfpf_storm_rx_mode(bp); bnx2x_set_rx_mode_inner()
12707 struct bnx2x *bp = netdev_priv(netdev); bnx2x_mdio_read() local
12717 bnx2x_acquire_phy_lock(bp); bnx2x_mdio_read()
12718 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); bnx2x_mdio_read()
12719 bnx2x_release_phy_lock(bp); bnx2x_mdio_read()
12731 struct bnx2x *bp = netdev_priv(netdev); bnx2x_mdio_write() local
12741 bnx2x_acquire_phy_lock(bp); bnx2x_mdio_write()
12742 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); bnx2x_mdio_write()
12743 bnx2x_release_phy_lock(bp); bnx2x_mdio_write()
12750 struct bnx2x *bp = netdev_priv(dev); bnx2x_ioctl() local
12758 return bnx2x_hwtstamp_ioctl(bp, ifr); bnx2x_ioctl()
12762 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); bnx2x_ioctl()
12769 struct bnx2x *bp = netdev_priv(dev); poll_bnx2x() local
12772 for_each_eth_queue(bp, i) { for_each_eth_queue()
12773 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
12774 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); for_each_eth_queue()
12781 struct bnx2x *bp = netdev_priv(dev); bnx2x_validate_addr() local
12784 if (IS_VF(bp)) bnx2x_validate_addr()
12785 bnx2x_sample_bulletin(bp); bnx2x_validate_addr()
12797 struct bnx2x *bp = netdev_priv(netdev); bnx2x_get_phys_port_id() local
12799 if (!(bp->flags & HAS_PHYS_PORT_ID)) bnx2x_get_phys_port_id()
12802 ppid->id_len = sizeof(bp->phys_port_id); bnx2x_get_phys_port_id()
12803 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); bnx2x_get_phys_port_id()
12816 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) __bnx2x_vlan_configure_vid() argument
12820 if (IS_PF(bp)) { __bnx2x_vlan_configure_vid()
12824 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj, __bnx2x_vlan_configure_vid()
12827 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add); __bnx2x_vlan_configure_vid()
12833 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) bnx2x_vlan_reconfigure_vid() argument
12838 if (!bp->vlan_cnt) { bnx2x_vlan_reconfigure_vid()
12843 list_for_each_entry(vlan, &bp->vlan_reg, link) { bnx2x_vlan_reconfigure_vid()
12855 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); bnx2x_vlan_reconfigure_vid()
12869 struct bnx2x *bp = netdev_priv(dev); bnx2x_vlan_rx_add_vid() local
12874 if (!netif_running(bp->dev)) { bnx2x_vlan_rx_add_vid()
12886 bp->vlan_cnt++; bnx2x_vlan_rx_add_vid()
12887 if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { bnx2x_vlan_rx_add_vid()
12889 bp->accept_any_vlan = true; bnx2x_vlan_rx_add_vid()
12890 if (IS_PF(bp)) bnx2x_vlan_rx_add_vid()
12891 bnx2x_set_rx_mode_inner(bp); bnx2x_vlan_rx_add_vid()
12893 bnx2x_vfpf_storm_rx_mode(bp); bnx2x_vlan_rx_add_vid()
12894 } else if (bp->vlan_cnt <= bp->vlan_credit) { bnx2x_vlan_rx_add_vid()
12895 rc = __bnx2x_vlan_configure_vid(bp, vid, true); bnx2x_vlan_rx_add_vid()
12903 list_add(&vlan->link, &bp->vlan_reg); bnx2x_vlan_rx_add_vid()
12905 bp->vlan_cnt--; bnx2x_vlan_rx_add_vid()
12916 struct bnx2x *bp = netdev_priv(dev); bnx2x_vlan_rx_kill_vid() local
12920 if (!netif_running(bp->dev)) { bnx2x_vlan_rx_kill_vid()
12928 if (!bp->vlan_cnt) { bnx2x_vlan_rx_kill_vid()
12933 list_for_each_entry(vlan, &bp->vlan_reg, link) bnx2x_vlan_rx_kill_vid()
12943 rc = __bnx2x_vlan_configure_vid(bp, vid, false); bnx2x_vlan_rx_kill_vid()
12948 bp->vlan_cnt--; bnx2x_vlan_rx_kill_vid()
12950 if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { bnx2x_vlan_rx_kill_vid()
12952 list_for_each_entry(vlan, &bp->vlan_reg, link) { bnx2x_vlan_rx_kill_vid()
12956 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); bnx2x_vlan_rx_kill_vid()
12967 bp->accept_any_vlan = false; bnx2x_vlan_rx_kill_vid()
12968 if (IS_PF(bp)) bnx2x_vlan_rx_kill_vid()
12969 bnx2x_set_rx_mode_inner(bp); bnx2x_vlan_rx_kill_vid()
12971 bnx2x_vfpf_storm_rx_mode(bp); bnx2x_vlan_rx_kill_vid()
13019 static int bnx2x_set_coherency_mask(struct bnx2x *bp) bnx2x_set_coherency_mask() argument
13021 struct device *dev = &bp->pdev->dev; bnx2x_set_coherency_mask()
13032 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) bnx2x_disable_pcie_error_reporting() argument
13034 if (bp->flags & AER_ENABLED) { bnx2x_disable_pcie_error_reporting()
13035 pci_disable_pcie_error_reporting(bp->pdev); bnx2x_disable_pcie_error_reporting()
13036 bp->flags &= ~AER_ENABLED; bnx2x_disable_pcie_error_reporting()
13040 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, bnx2x_init_dev() argument
13051 bp->dev = dev; bnx2x_init_dev()
13052 bp->pdev = pdev; bnx2x_init_dev()
13056 dev_err(&bp->pdev->dev, bnx2x_init_dev()
13062 dev_err(&bp->pdev->dev, bnx2x_init_dev()
13068 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { bnx2x_init_dev()
13069 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); bnx2x_init_dev()
13085 dev_err(&bp->pdev->dev, bnx2x_init_dev()
13094 if (IS_PF(bp)) { bnx2x_init_dev()
13096 dev_err(&bp->pdev->dev, bnx2x_init_dev()
13104 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); bnx2x_init_dev()
13109 rc = bnx2x_set_coherency_mask(bp); bnx2x_init_dev()
13119 bp->regview = pci_ioremap_bar(pdev, 0); bnx2x_init_dev()
13120 if (!bp->regview) { bnx2x_init_dev()
13121 dev_err(&bp->pdev->dev, bnx2x_init_dev()
13133 bp->pf_num = PCI_FUNC(pdev->devfn); bnx2x_init_dev()
13136 pci_read_config_dword(bp->pdev, bnx2x_init_dev()
13138 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> bnx2x_init_dev()
13141 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); bnx2x_init_dev()
13144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, bnx2x_init_dev()
13153 bp->flags |= AER_ENABLED; bnx2x_init_dev()
13161 if (IS_PF(bp)) { bnx2x_init_dev()
13162 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); bnx2x_init_dev()
13163 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); bnx2x_init_dev()
13164 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); bnx2x_init_dev()
13165 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); bnx2x_init_dev()
13168 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); bnx2x_init_dev()
13169 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); bnx2x_init_dev()
13170 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); bnx2x_init_dev()
13171 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); bnx2x_init_dev()
13179 REG_WR(bp, bnx2x_init_dev()
13186 bnx2x_set_ethtool_ops(bp, dev); bnx2x_init_dev()
13209 if (IS_PF(bp)) { bnx2x_init_dev()
13211 bp->accept_any_vlan = true; bnx2x_init_dev()
13215 } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) { bnx2x_init_dev()
13231 bp->mdio.prtad = MDIO_PRTAD_NONE; bnx2x_init_dev()
13232 bp->mdio.mmds = 0; bnx2x_init_dev()
13233 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; bnx2x_init_dev()
13234 bp->mdio.dev = dev; bnx2x_init_dev()
13235 bp->mdio.mdio_read = bnx2x_mdio_read; bnx2x_init_dev()
13236 bp->mdio.mdio_write = bnx2x_mdio_write; bnx2x_init_dev()
13251 static int bnx2x_check_firmware(struct bnx2x *bp) bnx2x_check_firmware() argument
13253 const struct firmware *firmware = bp->firmware; bnx2x_check_firmware()
13375 bp->arr = kmalloc(len, GFP_KERNEL); \
13376 if (!bp->arr) \
13378 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13379 (u8 *)bp->arr, len); \
13382 static int bnx2x_init_firmware(struct bnx2x *bp) bnx2x_init_firmware() argument
13388 if (bp->firmware) bnx2x_init_firmware()
13391 if (CHIP_IS_E1(bp)) bnx2x_init_firmware()
13393 else if (CHIP_IS_E1H(bp)) bnx2x_init_firmware()
13395 else if (!CHIP_IS_E1x(bp)) bnx2x_init_firmware()
13403 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); bnx2x_init_firmware()
13410 rc = bnx2x_check_firmware(bp); bnx2x_init_firmware()
13416 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; bnx2x_init_firmware()
13430 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13432 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13434 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13436 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13438 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13440 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13442 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13444 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + bnx2x_init_firmware()
13452 kfree(bp->init_ops_offsets); bnx2x_init_firmware()
13454 kfree(bp->init_ops); bnx2x_init_firmware()
13456 kfree(bp->init_data); bnx2x_init_firmware()
13458 release_firmware(bp->firmware); bnx2x_init_firmware()
13459 bp->firmware = NULL; bnx2x_init_firmware()
13464 static void bnx2x_release_firmware(struct bnx2x *bp) bnx2x_release_firmware() argument
13466 kfree(bp->init_ops_offsets); bnx2x_release_firmware()
13467 kfree(bp->init_ops); bnx2x_release_firmware()
13468 kfree(bp->init_data); bnx2x_release_firmware()
13469 release_firmware(bp->firmware); bnx2x_release_firmware()
13470 bp->firmware = NULL; bnx2x_release_firmware()
13490 void bnx2x__init_func_obj(struct bnx2x *bp) bnx2x__init_func_obj() argument
13493 bnx2x_setup_dmae(bp); bnx2x__init_func_obj()
13495 bnx2x_init_func_obj(bp, &bp->func_obj, bnx2x__init_func_obj()
13496 bnx2x_sp(bp, func_rdata), bnx2x__init_func_obj()
13497 bnx2x_sp_mapping(bp, func_rdata), bnx2x__init_func_obj()
13498 bnx2x_sp(bp, func_afex_rdata), bnx2x__init_func_obj()
13499 bnx2x_sp_mapping(bp, func_afex_rdata), bnx2x__init_func_obj()
13504 static int bnx2x_set_qm_cid_count(struct bnx2x *bp) bnx2x_set_qm_cid_count() argument
13506 int cid_count = BNX2X_L2_MAX_CID(bp); bnx2x_set_qm_cid_count()
13508 if (IS_SRIOV(bp)) bnx2x_set_qm_cid_count()
13511 if (CNIC_SUPPORT(bp)) bnx2x_set_qm_cid_count()
13611 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, bnx2x_send_update_drift_ramrod() argument
13622 func_params.f_obj = &bp->func_obj; bnx2x_send_update_drift_ramrod()
13633 return bnx2x_func_state_change(bp, &func_params); bnx2x_send_update_drift_ramrod()
13638 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_adjfreq() local
13646 if (!netif_running(bp->dev)) { bnx2x_ptp_adjfreq()
13691 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, bnx2x_ptp_adjfreq()
13706 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_adjtime() local
13710 timecounter_adjtime(&bp->timecounter, delta); bnx2x_ptp_adjtime()
13717 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_gettime() local
13720 ns = timecounter_read(&bp->timecounter); bnx2x_ptp_gettime()
13732 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_settime() local
13740 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); bnx2x_ptp_settime()
13749 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); bnx2x_ptp_enable() local
13755 static void bnx2x_register_phc(struct bnx2x *bp) bnx2x_register_phc() argument
13758 bp->ptp_clock_info.owner = THIS_MODULE; bnx2x_register_phc()
13759 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); bnx2x_register_phc()
13760 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ bnx2x_register_phc()
13761 bp->ptp_clock_info.n_alarm = 0; bnx2x_register_phc()
13762 bp->ptp_clock_info.n_ext_ts = 0; bnx2x_register_phc()
13763 bp->ptp_clock_info.n_per_out = 0; bnx2x_register_phc()
13764 bp->ptp_clock_info.pps = 0; bnx2x_register_phc()
13765 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; bnx2x_register_phc()
13766 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; bnx2x_register_phc()
13767 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; bnx2x_register_phc()
13768 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; bnx2x_register_phc()
13769 bp->ptp_clock_info.enable = bnx2x_ptp_enable; bnx2x_register_phc()
13771 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); bnx2x_register_phc()
13772 if (IS_ERR(bp->ptp_clock)) { bnx2x_register_phc()
13773 bp->ptp_clock = NULL; bnx2x_register_phc()
13782 struct bnx2x *bp; bnx2x_init_one() local
13807 * initialization of bp->max_cos based on the chip versions AND chip bnx2x_init_one()
13836 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); bnx2x_init_one()
13840 bp = netdev_priv(dev); bnx2x_init_one()
13842 bp->flags = 0; bnx2x_init_one()
13844 bp->flags |= IS_VF_FLAG; bnx2x_init_one()
13846 bp->igu_sb_cnt = max_non_def_sbs; bnx2x_init_one()
13847 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; bnx2x_init_one()
13848 bp->msg_enable = debug; bnx2x_init_one()
13849 bp->cnic_support = cnic_cnt; bnx2x_init_one()
13850 bp->cnic_probe = bnx2x_cnic_probe; bnx2x_init_one()
13854 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); bnx2x_init_one()
13861 IS_PF(bp) ? "physical" : "virtual"); bnx2x_init_one()
13862 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); bnx2x_init_one()
13867 rc = bnx2x_init_bp(bp); bnx2x_init_one()
13871 /* Map doorbells here as we need the real value of bp->max_cos which bnx2x_init_one()
13875 if (IS_VF(bp)) { bnx2x_init_one()
13876 bp->doorbells = bnx2x_vf_doorbells(bp); bnx2x_init_one()
13877 rc = bnx2x_vf_pci_alloc(bp); bnx2x_init_one()
13881 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); bnx2x_init_one()
13883 dev_err(&bp->pdev->dev, bnx2x_init_one()
13888 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), bnx2x_init_one()
13891 if (!bp->doorbells) { bnx2x_init_one()
13892 dev_err(&bp->pdev->dev, bnx2x_init_one()
13898 if (IS_VF(bp)) { bnx2x_init_one()
13899 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); bnx2x_init_one()
13905 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); bnx2x_init_one()
13910 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); bnx2x_init_one()
13911 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); bnx2x_init_one()
13914 if (CHIP_IS_E1x(bp)) bnx2x_init_one()
13915 bp->flags |= NO_FCOE_FLAG; bnx2x_init_one()
13917 /* Set bp->num_queues for MSI-X mode*/ bnx2x_init_one()
13918 bnx2x_set_num_queues(bp); bnx2x_init_one()
13923 rc = bnx2x_set_int_mode(bp); bnx2x_init_one()
13938 if (!NO_FCOE(bp)) { bnx2x_init_one()
13941 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); bnx2x_init_one()
13944 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || bnx2x_init_one()
13952 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), bnx2x_init_one()
13958 dev->base_addr, bp->pdev->irq, dev->dev_addr); bnx2x_init_one()
13960 bnx2x_register_phc(bp); bnx2x_init_one()
13962 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) bnx2x_init_one()
13963 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); bnx2x_init_one()
13968 bnx2x_disable_pcie_error_reporting(bp); bnx2x_init_one()
13970 if (bp->regview) bnx2x_init_one()
13971 iounmap(bp->regview); bnx2x_init_one()
13973 if (IS_PF(bp) && bp->doorbells) bnx2x_init_one()
13974 iounmap(bp->doorbells); bnx2x_init_one()
13988 struct bnx2x *bp, __bnx2x_remove()
13991 if (bp->ptp_clock) { __bnx2x_remove()
13992 ptp_clock_unregister(bp->ptp_clock); __bnx2x_remove()
13993 bp->ptp_clock = NULL; __bnx2x_remove()
13997 if (!NO_FCOE(bp)) { __bnx2x_remove()
13999 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); __bnx2x_remove()
14005 bnx2x_dcbnl_update_applist(bp, true); __bnx2x_remove()
14008 if (IS_PF(bp) && __bnx2x_remove()
14009 !BP_NOMCP(bp) && __bnx2x_remove()
14010 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) __bnx2x_remove()
14011 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); __bnx2x_remove()
14022 bnx2x_iov_remove_one(bp); __bnx2x_remove()
14025 if (IS_PF(bp)) { __bnx2x_remove()
14026 bnx2x_set_power_state(bp, PCI_D0); __bnx2x_remove()
14027 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); __bnx2x_remove()
14032 bnx2x_reset_endianity(bp); __bnx2x_remove()
14036 bnx2x_disable_msi(bp); __bnx2x_remove()
14039 if (IS_PF(bp)) __bnx2x_remove()
14040 bnx2x_set_power_state(bp, PCI_D3hot); __bnx2x_remove()
14043 cancel_delayed_work_sync(&bp->sp_rtnl_task); __bnx2x_remove()
14046 if (IS_VF(bp)) __bnx2x_remove()
14047 bnx2x_vfpf_release(bp); __bnx2x_remove()
14051 pci_wake_from_d3(pdev, bp->wol); __bnx2x_remove()
14055 bnx2x_disable_pcie_error_reporting(bp); __bnx2x_remove()
14057 if (bp->regview) __bnx2x_remove()
14058 iounmap(bp->regview); __bnx2x_remove()
14063 if (IS_PF(bp)) { __bnx2x_remove()
14064 if (bp->doorbells) __bnx2x_remove()
14065 iounmap(bp->doorbells); __bnx2x_remove()
14067 bnx2x_release_firmware(bp); __bnx2x_remove()
14069 bnx2x_vf_pci_dealloc(bp); __bnx2x_remove()
14071 bnx2x_free_mem_bp(bp); __bnx2x_remove()
14085 struct bnx2x *bp; bnx2x_remove_one() local
14091 bp = netdev_priv(dev); bnx2x_remove_one()
14093 __bnx2x_remove(pdev, dev, bp, true); bnx2x_remove_one()
14096 static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_eeh_nic_unload() argument
14098 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bnx2x_eeh_nic_unload()
14100 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_eeh_nic_unload()
14102 if (CNIC_LOADED(bp)) bnx2x_eeh_nic_unload()
14103 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); bnx2x_eeh_nic_unload()
14106 bnx2x_tx_disable(bp); bnx2x_eeh_nic_unload()
14108 bnx2x_del_all_napi(bp); bnx2x_eeh_nic_unload()
14109 if (CNIC_LOADED(bp)) bnx2x_eeh_nic_unload()
14110 bnx2x_del_all_napi_cnic(bp); bnx2x_eeh_nic_unload()
14111 netdev_reset_tc(bp->dev); bnx2x_eeh_nic_unload()
14113 del_timer_sync(&bp->timer); bnx2x_eeh_nic_unload()
14114 cancel_delayed_work_sync(&bp->sp_task); bnx2x_eeh_nic_unload()
14115 cancel_delayed_work_sync(&bp->period_task); bnx2x_eeh_nic_unload()
14117 if (!down_timeout(&bp->stats_lock, HZ / 10)) { bnx2x_eeh_nic_unload()
14118 bp->stats_state = STATS_STATE_DISABLED; bnx2x_eeh_nic_unload()
14119 up(&bp->stats_lock); bnx2x_eeh_nic_unload()
14122 bnx2x_save_statistics(bp); bnx2x_eeh_nic_unload()
14124 netif_carrier_off(bp->dev); bnx2x_eeh_nic_unload()
14141 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_error_detected() local
14155 bnx2x_eeh_nic_unload(bp); bnx2x_io_error_detected()
14157 bnx2x_prev_path_mark_eeh(bp); bnx2x_io_error_detected()
14176 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_slot_reset() local
14193 bnx2x_set_power_state(bp, PCI_D0); bnx2x_io_slot_reset()
14199 bnx2x_init_shmem(bp); bnx2x_io_slot_reset()
14201 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { bnx2x_io_slot_reset()
14204 v = SHMEM2_RD(bp, bnx2x_io_slot_reset()
14205 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); bnx2x_io_slot_reset()
14206 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], bnx2x_io_slot_reset()
14209 bnx2x_drain_tx_queues(bp); bnx2x_io_slot_reset()
14210 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); bnx2x_io_slot_reset()
14211 bnx2x_netif_stop(bp, 1); bnx2x_io_slot_reset()
14212 bnx2x_free_irq(bp); bnx2x_io_slot_reset()
14215 bnx2x_send_unload_done(bp, true); bnx2x_io_slot_reset()
14217 bp->sp_state = 0; bnx2x_io_slot_reset()
14218 bp->port.pmf = 0; bnx2x_io_slot_reset()
14220 bnx2x_prev_unload(bp); bnx2x_io_slot_reset()
14225 bnx2x_squeeze_objects(bp); bnx2x_io_slot_reset()
14226 bnx2x_free_skbs(bp); bnx2x_io_slot_reset()
14227 for_each_rx_queue(bp, i) bnx2x_io_slot_reset()
14228 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_io_slot_reset()
14229 bnx2x_free_fp_mem(bp); bnx2x_io_slot_reset()
14230 bnx2x_free_mem(bp); bnx2x_io_slot_reset()
14232 bp->state = BNX2X_STATE_CLOSED; bnx2x_io_slot_reset()
14238 if (bp->flags & AER_ENABLED) { bnx2x_io_slot_reset()
14258 struct bnx2x *bp = netdev_priv(dev); bnx2x_io_resume() local
14260 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_io_resume()
14261 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); bnx2x_io_resume()
14267 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_io_resume()
14271 bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_io_resume()
14287 struct bnx2x *bp; bnx2x_shutdown() local
14292 bp = netdev_priv(dev); bnx2x_shutdown()
14293 if (!bp) bnx2x_shutdown()
14304 __bnx2x_remove(pdev, dev, bp, false); bnx2x_shutdown()
14366 void bnx2x_notify_link_changed(struct bnx2x *bp) bnx2x_notify_link_changed() argument
14368 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); bnx2x_notify_link_changed()
14377 * @bp: driver handle
14383 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) bnx2x_set_iscsi_eth_mac_addr() argument
14388 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, bnx2x_set_iscsi_eth_mac_addr()
14389 &bp->iscsi_l2_mac_obj, true, bnx2x_set_iscsi_eth_mac_addr()
14394 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) bnx2x_cnic_sp_post() argument
14400 if (unlikely(bp->panic)) bnx2x_cnic_sp_post()
14404 spin_lock_bh(&bp->spq_lock); bnx2x_cnic_sp_post()
14405 BUG_ON(bp->cnic_spq_pending < count); bnx2x_cnic_sp_post()
14406 bp->cnic_spq_pending -= count; bnx2x_cnic_sp_post()
14408 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { bnx2x_cnic_sp_post()
14409 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) bnx2x_cnic_sp_post()
14412 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) bnx2x_cnic_sp_post()
14420 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / bnx2x_cnic_sp_post()
14422 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - bnx2x_cnic_sp_post()
14424 bnx2x_set_ctx_validation(bp, bnx2x_cnic_sp_post()
14425 &bp->context[cxt_index]. bnx2x_cnic_sp_post()
14427 BNX2X_ISCSI_ETH_CID(bp)); bnx2x_cnic_sp_post()
14438 if (!atomic_read(&bp->cq_spq_left)) bnx2x_cnic_sp_post()
14441 atomic_dec(&bp->cq_spq_left); bnx2x_cnic_sp_post()
14443 if (!atomic_read(&bp->eq_spq_left)) bnx2x_cnic_sp_post()
14446 atomic_dec(&bp->eq_spq_left); bnx2x_cnic_sp_post()
14449 if (bp->cnic_spq_pending >= bnx2x_cnic_sp_post()
14450 bp->cnic_eth_dev.max_kwqe_pending) bnx2x_cnic_sp_post()
14453 bp->cnic_spq_pending++; bnx2x_cnic_sp_post()
14460 spe = bnx2x_sp_get_next(bp); bnx2x_cnic_sp_post()
14461 *spe = *bp->cnic_kwq_cons; bnx2x_cnic_sp_post()
14464 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); bnx2x_cnic_sp_post()
14466 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) bnx2x_cnic_sp_post()
14467 bp->cnic_kwq_cons = bp->cnic_kwq; bnx2x_cnic_sp_post()
14469 bp->cnic_kwq_cons++; bnx2x_cnic_sp_post()
14471 bnx2x_sp_prod_update(bp); bnx2x_cnic_sp_post()
14472 spin_unlock_bh(&bp->spq_lock); bnx2x_cnic_sp_post()
14478 struct bnx2x *bp = netdev_priv(dev); bnx2x_cnic_sp_queue() local
14482 if (unlikely(bp->panic)) { bnx2x_cnic_sp_queue()
14488 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && bnx2x_cnic_sp_queue()
14489 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { bnx2x_cnic_sp_queue()
14494 spin_lock_bh(&bp->spq_lock); bnx2x_cnic_sp_queue()
14499 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) bnx2x_cnic_sp_queue()
14502 *bp->cnic_kwq_prod = *spe; bnx2x_cnic_sp_queue()
14504 bp->cnic_kwq_pending++; bnx2x_cnic_sp_queue()
14510 bp->cnic_kwq_pending); bnx2x_cnic_sp_queue()
14512 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) bnx2x_cnic_sp_queue()
14513 bp->cnic_kwq_prod = bp->cnic_kwq; bnx2x_cnic_sp_queue()
14515 bp->cnic_kwq_prod++; bnx2x_cnic_sp_queue()
14518 spin_unlock_bh(&bp->spq_lock); bnx2x_cnic_sp_queue()
14520 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) bnx2x_cnic_sp_queue()
14521 bnx2x_cnic_sp_post(bp, 0); bnx2x_cnic_sp_queue()
14526 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) bnx2x_cnic_ctl_send() argument
14531 mutex_lock(&bp->cnic_mutex); bnx2x_cnic_ctl_send()
14532 c_ops = rcu_dereference_protected(bp->cnic_ops, bnx2x_cnic_ctl_send()
14533 lockdep_is_held(&bp->cnic_mutex)); bnx2x_cnic_ctl_send()
14535 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); bnx2x_cnic_ctl_send()
14536 mutex_unlock(&bp->cnic_mutex); bnx2x_cnic_ctl_send()
14541 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) bnx2x_cnic_ctl_send_bh() argument
14547 c_ops = rcu_dereference(bp->cnic_ops); bnx2x_cnic_ctl_send_bh()
14549 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); bnx2x_cnic_ctl_send_bh()
14558 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) bnx2x_cnic_notify() argument
14564 return bnx2x_cnic_ctl_send(bp, &ctl); bnx2x_cnic_notify()
14567 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) bnx2x_cnic_cfc_comp() argument
14576 bnx2x_cnic_ctl_send_bh(bp, &ctl); bnx2x_cnic_cfc_comp()
14577 bnx2x_cnic_sp_post(bp, 0); bnx2x_cnic_cfc_comp()
14585 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) bnx2x_set_iscsi_eth_rx_mode() argument
14588 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); bnx2x_set_iscsi_eth_rx_mode()
14604 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode()
14609 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode()
14611 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) bnx2x_set_iscsi_eth_rx_mode()
14612 set_bit(sched_state, &bp->sp_state); bnx2x_set_iscsi_eth_rx_mode()
14615 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, bnx2x_set_iscsi_eth_rx_mode()
14622 struct bnx2x *bp = netdev_priv(dev); bnx2x_drv_ctl() local
14630 bnx2x_ilt_wr(bp, index, addr); bnx2x_drv_ctl()
14637 bnx2x_cnic_sp_post(bp, count); bnx2x_drv_ctl()
14643 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_drv_ctl()
14647 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, bnx2x_drv_ctl()
14649 cp->iscsi_l2_cid, BP_FUNC(bp), bnx2x_drv_ctl()
14650 bnx2x_sp(bp, mac_rdata), bnx2x_drv_ctl()
14651 bnx2x_sp_mapping(bp, mac_rdata), bnx2x_drv_ctl()
14653 &bp->sp_state, BNX2X_OBJ_TYPE_RX, bnx2x_drv_ctl()
14654 &bp->macs_pool); bnx2x_drv_ctl()
14657 rc = bnx2x_set_iscsi_eth_mac_addr(bp); bnx2x_drv_ctl()
14667 bnx2x_set_iscsi_eth_rx_mode(bp, true); bnx2x_drv_ctl()
14674 if (!bnx2x_wait_sp_comp(bp, sp_bits)) bnx2x_drv_ctl()
14686 bnx2x_set_iscsi_eth_rx_mode(bp, false); bnx2x_drv_ctl()
14693 if (!bnx2x_wait_sp_comp(bp, sp_bits)) bnx2x_drv_ctl()
14700 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, bnx2x_drv_ctl()
14708 atomic_add(count, &bp->cq_spq_left); bnx2x_drv_ctl()
14715 if (CHIP_IS_E3(bp)) { bnx2x_drv_ctl()
14716 int idx = BP_FW_MB_IDX(bp); bnx2x_drv_ctl()
14717 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); bnx2x_drv_ctl()
14718 int path = BP_PATH(bp); bnx2x_drv_ctl()
14719 int port = BP_PORT(bp); bnx2x_drv_ctl()
14729 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); bnx2x_drv_ctl()
14732 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || bnx2x_drv_ctl()
14733 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) bnx2x_drv_ctl()
14737 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); bnx2x_drv_ctl()
14746 REG_WR(bp, scratch_offset + i, bnx2x_drv_ctl()
14749 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_drv_ctl()
14756 if (CHIP_IS_E3(bp)) { bnx2x_drv_ctl()
14757 int idx = BP_FW_MB_IDX(bp); bnx2x_drv_ctl()
14760 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); bnx2x_drv_ctl()
14765 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); bnx2x_drv_ctl()
14767 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_drv_ctl()
14777 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) { bnx2x_drv_ctl()
14782 bnx2x_set_os_driver_state(bp, bnx2x_drv_ctl()
14786 bnx2x_set_os_driver_state(bp, bnx2x_drv_ctl()
14790 bnx2x_set_os_driver_state(bp, bnx2x_drv_ctl()
14804 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_fc_npiv() local
14810 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0])) bnx2x_get_fc_npiv()
14821 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); bnx2x_get_fc_npiv()
14825 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { bnx2x_get_fc_npiv()
14863 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) bnx2x_setup_cnic_irq_info() argument
14865 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_setup_cnic_irq_info()
14867 if (bp->flags & USING_MSIX_FLAG) { bnx2x_setup_cnic_irq_info()
14870 cp->irq_arr[0].vector = bp->msix_table[1].vector; bnx2x_setup_cnic_irq_info()
14875 if (!CHIP_IS_E1x(bp)) bnx2x_setup_cnic_irq_info()
14876 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; bnx2x_setup_cnic_irq_info()
14878 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; bnx2x_setup_cnic_irq_info()
14880 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); bnx2x_setup_cnic_irq_info()
14881 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); bnx2x_setup_cnic_irq_info()
14882 cp->irq_arr[1].status_blk = bp->def_status_blk; bnx2x_setup_cnic_irq_info()
14889 void bnx2x_setup_cnic_info(struct bnx2x *bp) bnx2x_setup_cnic_info() argument
14891 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_setup_cnic_info()
14893 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_setup_cnic_info()
14894 bnx2x_cid_ilt_lines(bp); bnx2x_setup_cnic_info()
14895 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; bnx2x_setup_cnic_info()
14896 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); bnx2x_setup_cnic_info()
14897 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); bnx2x_setup_cnic_info()
14899 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", bnx2x_setup_cnic_info()
14900 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, bnx2x_setup_cnic_info()
14903 if (NO_ISCSI_OOO(bp)) bnx2x_setup_cnic_info()
14910 struct bnx2x *bp = netdev_priv(dev); bnx2x_register_cnic() local
14911 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_register_cnic()
14921 if (!CNIC_SUPPORT(bp)) { bnx2x_register_cnic()
14926 if (!CNIC_LOADED(bp)) { bnx2x_register_cnic()
14927 rc = bnx2x_load_cnic(bp); bnx2x_register_cnic()
14934 bp->cnic_enabled = true; bnx2x_register_cnic()
14936 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); bnx2x_register_cnic()
14937 if (!bp->cnic_kwq) bnx2x_register_cnic()
14940 bp->cnic_kwq_cons = bp->cnic_kwq; bnx2x_register_cnic()
14941 bp->cnic_kwq_prod = bp->cnic_kwq; bnx2x_register_cnic()
14942 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; bnx2x_register_cnic()
14944 bp->cnic_spq_pending = 0; bnx2x_register_cnic()
14945 bp->cnic_kwq_pending = 0; bnx2x_register_cnic()
14947 bp->cnic_data = data; bnx2x_register_cnic()
14951 cp->iro_arr = bp->iro_arr; bnx2x_register_cnic()
14953 bnx2x_setup_cnic_irq_info(bp); bnx2x_register_cnic()
14955 rcu_assign_pointer(bp->cnic_ops, ops); bnx2x_register_cnic()
14958 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); bnx2x_register_cnic()
14965 struct bnx2x *bp = netdev_priv(dev); bnx2x_unregister_cnic() local
14966 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_unregister_cnic()
14968 mutex_lock(&bp->cnic_mutex); bnx2x_unregister_cnic()
14970 RCU_INIT_POINTER(bp->cnic_ops, NULL); bnx2x_unregister_cnic()
14971 mutex_unlock(&bp->cnic_mutex); bnx2x_unregister_cnic()
14973 bp->cnic_enabled = false; bnx2x_unregister_cnic()
14974 kfree(bp->cnic_kwq); bnx2x_unregister_cnic()
14975 bp->cnic_kwq = NULL; bnx2x_unregister_cnic()
14982 struct bnx2x *bp = netdev_priv(dev); bnx2x_cnic_probe() local
14983 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_cnic_probe()
14989 if (NO_ISCSI(bp) && NO_FCOE(bp)) bnx2x_cnic_probe()
14993 cp->chip_id = CHIP_ID(bp); bnx2x_cnic_probe()
14994 cp->pdev = bp->pdev; bnx2x_cnic_probe()
14995 cp->io_base = bp->regview; bnx2x_cnic_probe()
14996 cp->io_base2 = bp->doorbells; bnx2x_cnic_probe()
14999 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + bnx2x_cnic_probe()
15000 bnx2x_cid_ilt_lines(bp); bnx2x_cnic_probe()
15002 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; bnx2x_cnic_probe()
15008 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); bnx2x_cnic_probe()
15010 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); bnx2x_cnic_probe()
15011 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); bnx2x_cnic_probe()
15013 if (NO_ISCSI_OOO(bp)) bnx2x_cnic_probe()
15016 if (NO_ISCSI(bp)) bnx2x_cnic_probe()
15019 if (NO_FCOE(bp)) bnx2x_cnic_probe()
15033 struct bnx2x *bp = fp->bp; bnx2x_rx_ustorm_prods_offset() local
15036 if (IS_VF(bp)) bnx2x_rx_ustorm_prods_offset()
15037 return bnx2x_vf_ustorm_prods_offset(bp, fp); bnx2x_rx_ustorm_prods_offset()
15038 else if (!CHIP_IS_E1x(bp)) bnx2x_rx_ustorm_prods_offset()
15041 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); bnx2x_rx_ustorm_prods_offset()
15051 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) bnx2x_pretend_func() argument
15055 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) bnx2x_pretend_func()
15059 pretend_reg = bnx2x_get_pretend_reg(bp); bnx2x_pretend_func()
15060 REG_WR(bp, pretend_reg, pretend_func_val); bnx2x_pretend_func()
15061 REG_RD(bp, pretend_reg); bnx2x_pretend_func()
15067 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); bnx2x_ptp_task() local
15068 int port = BP_PORT(bp); bnx2x_ptp_task()
15074 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_ptp_task()
15078 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : bnx2x_ptp_task()
15081 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : bnx2x_ptp_task()
15084 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_ptp_task()
15086 ns = timecounter_cyc2time(&bp->timecounter, timestamp); bnx2x_ptp_task()
15090 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); bnx2x_ptp_task()
15091 dev_kfree_skb_any(bp->ptp_tx_skb); bnx2x_ptp_task()
15092 bp->ptp_tx_skb = NULL; bnx2x_ptp_task()
15099 schedule_work(&bp->ptp_task); bnx2x_ptp_task()
15103 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) bnx2x_set_rx_ts() argument
15105 int port = BP_PORT(bp); bnx2x_set_rx_ts()
15108 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : bnx2x_set_rx_ts()
15111 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : bnx2x_set_rx_ts()
15115 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : bnx2x_set_rx_ts()
15118 ns = timecounter_cyc2time(&bp->timecounter, timestamp); bnx2x_set_rx_ts()
15129 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); bnx2x_cyclecounter_read() local
15130 int port = BP_PORT(bp); bnx2x_cyclecounter_read()
15134 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : bnx2x_cyclecounter_read()
15144 static void bnx2x_init_cyclecounter(struct bnx2x *bp) bnx2x_init_cyclecounter() argument
15146 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); bnx2x_init_cyclecounter()
15147 bp->cyclecounter.read = bnx2x_cyclecounter_read; bnx2x_init_cyclecounter()
15148 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); bnx2x_init_cyclecounter()
15149 bp->cyclecounter.shift = 1; bnx2x_init_cyclecounter()
15150 bp->cyclecounter.mult = 1; bnx2x_init_cyclecounter()
15153 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) bnx2x_send_reset_timesync_ramrod() argument
15163 func_params.f_obj = &bp->func_obj; bnx2x_send_reset_timesync_ramrod()
15170 return bnx2x_func_state_change(bp, &func_params); bnx2x_send_reset_timesync_ramrod()
15173 static int bnx2x_enable_ptp_packets(struct bnx2x *bp) bnx2x_enable_ptp_packets() argument
15188 for_each_eth_queue(bp, i) { for_each_eth_queue()
15189 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
15192 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue()
15195 rc = bnx2x_queue_state_change(bp, &q_params); for_each_eth_queue()
15205 int bnx2x_configure_ptp_filters(struct bnx2x *bp) bnx2x_configure_ptp_filters() argument
15207 int port = BP_PORT(bp); bnx2x_configure_ptp_filters()
15210 if (!bp->hwtstamp_ioctl_called) bnx2x_configure_ptp_filters()
15213 switch (bp->tx_type) { bnx2x_configure_ptp_filters()
15215 bp->flags |= TX_TIMESTAMPING_EN; bnx2x_configure_ptp_filters()
15216 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters()
15218 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters()
15226 switch (bp->rx_filter) { bnx2x_configure_ptp_filters()
15231 bp->rx_filter = HWTSTAMP_FILTER_NONE; bnx2x_configure_ptp_filters()
15236 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; bnx2x_configure_ptp_filters()
15238 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters()
15240 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters()
15246 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; bnx2x_configure_ptp_filters()
15248 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters()
15250 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters()
15256 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; bnx2x_configure_ptp_filters()
15258 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters()
15260 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters()
15267 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; bnx2x_configure_ptp_filters()
15269 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp_filters()
15271 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp_filters()
15277 rc = bnx2x_enable_ptp_packets(bp); bnx2x_configure_ptp_filters()
15282 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_configure_ptp_filters()
15288 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) bnx2x_hwtstamp_ioctl() argument
15306 bp->hwtstamp_ioctl_called = 1; bnx2x_hwtstamp_ioctl()
15307 bp->tx_type = config.tx_type; bnx2x_hwtstamp_ioctl()
15308 bp->rx_filter = config.rx_filter; bnx2x_hwtstamp_ioctl()
15310 rc = bnx2x_configure_ptp_filters(bp); bnx2x_hwtstamp_ioctl()
15314 config.rx_filter = bp->rx_filter; bnx2x_hwtstamp_ioctl()
15321 static int bnx2x_configure_ptp(struct bnx2x *bp) bnx2x_configure_ptp() argument
15323 int rc, port = BP_PORT(bp); bnx2x_configure_ptp()
15327 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : bnx2x_configure_ptp()
15329 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : bnx2x_configure_ptp()
15331 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : bnx2x_configure_ptp()
15333 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : bnx2x_configure_ptp()
15337 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : bnx2x_configure_ptp()
15341 REG_WR(bp, port ? NIG_REG_P1_PTP_EN : bnx2x_configure_ptp()
15347 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); bnx2x_configure_ptp()
15350 rc = bnx2x_send_reset_timesync_ramrod(bp); bnx2x_configure_ptp()
15357 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : bnx2x_configure_ptp()
15359 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : bnx2x_configure_ptp()
15366 void bnx2x_init_ptp(struct bnx2x *bp) bnx2x_init_ptp() argument
15371 rc = bnx2x_configure_ptp(bp); bnx2x_init_ptp()
15378 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); bnx2x_init_ptp()
15384 if (!bp->timecounter_init_done) { bnx2x_init_ptp()
15385 bnx2x_init_cyclecounter(bp); bnx2x_init_ptp()
15386 timecounter_init(&bp->timecounter, &bp->cyclecounter, bnx2x_init_ptp()
15388 bp->timecounter_init_done = 1; bnx2x_init_ptp()
5426 bnx2x_cid_to_q_obj( struct bnx2x *bp, u32 cid) bnx2x_cid_to_q_obj() argument
13986 __bnx2x_remove(struct pci_dev *pdev, struct net_device *dev, struct bnx2x *bp, bool remove_netdev) __bnx2x_remove() argument
H A Dbnx2x_stats.c44 static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) bnx2x_get_port_stats_dma_len() argument
49 if (SHMEM2_HAS(bp, sizeof_port_stats)) { bnx2x_get_port_stats_dma_len()
50 u32 size = SHMEM2_RD(bp, sizeof_port_stats); bnx2x_get_port_stats_dma_len()
66 if (bp->flags & BC_SUPPORTS_PFC_STATS) { bnx2x_get_port_stats_dma_len()
84 static void bnx2x_dp_stats(struct bnx2x *bp) bnx2x_dp_stats() argument
96 bp->fw_stats_req->hdr.cmd_num, bnx2x_dp_stats()
97 bp->fw_stats_req->hdr.reserved0, bnx2x_dp_stats()
98 bp->fw_stats_req->hdr.drv_stats_counter, bnx2x_dp_stats()
99 bp->fw_stats_req->hdr.reserved1, bnx2x_dp_stats()
100 bp->fw_stats_req->hdr.stats_counters_addrs.hi, bnx2x_dp_stats()
101 bp->fw_stats_req->hdr.stats_counters_addrs.lo); bnx2x_dp_stats()
103 for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { bnx2x_dp_stats()
111 i, bp->fw_stats_req->query[i].kind, bnx2x_dp_stats()
112 bp->fw_stats_req->query[i].index, bnx2x_dp_stats()
113 bp->fw_stats_req->query[i].funcID, bnx2x_dp_stats()
114 bp->fw_stats_req->query[i].reserved, bnx2x_dp_stats()
115 bp->fw_stats_req->query[i].address.hi, bnx2x_dp_stats()
116 bp->fw_stats_req->query[i].address.lo); bnx2x_dp_stats()
123 * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
126 static void bnx2x_storm_stats_post(struct bnx2x *bp) bnx2x_storm_stats_post() argument
130 if (bp->stats_pending) bnx2x_storm_stats_post()
133 bp->fw_stats_req->hdr.drv_stats_counter = bnx2x_storm_stats_post()
134 cpu_to_le16(bp->stats_counter++); bnx2x_storm_stats_post()
137 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); bnx2x_storm_stats_post()
140 bnx2x_iov_adjust_stats_req(bp); bnx2x_storm_stats_post()
141 bnx2x_dp_stats(bp); bnx2x_storm_stats_post()
144 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, bnx2x_storm_stats_post()
145 U64_HI(bp->fw_stats_req_mapping), bnx2x_storm_stats_post()
146 U64_LO(bp->fw_stats_req_mapping), bnx2x_storm_stats_post()
149 bp->stats_pending = 1; bnx2x_storm_stats_post()
152 static void bnx2x_hw_stats_post(struct bnx2x *bp) bnx2x_hw_stats_post() argument
154 struct dmae_command *dmae = &bp->stats_dmae; bnx2x_hw_stats_post()
155 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_hw_stats_post()
158 if (CHIP_REV_IS_SLOW(bp)) bnx2x_hw_stats_post()
162 if (bp->func_stx) bnx2x_hw_stats_post()
163 memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, bnx2x_hw_stats_post()
164 sizeof(bp->func_stats)); bnx2x_hw_stats_post()
167 if (bp->executer_idx) { bnx2x_hw_stats_post()
168 int loader_idx = PMF_DMAE_C(bp); bnx2x_hw_stats_post()
169 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_hw_stats_post()
175 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); bnx2x_hw_stats_post()
176 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); bnx2x_hw_stats_post()
182 if (CHIP_IS_E1(bp)) bnx2x_hw_stats_post()
189 bnx2x_post_dmae(bp, dmae, loader_idx); bnx2x_hw_stats_post()
191 } else if (bp->func_stx) { bnx2x_hw_stats_post()
193 bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); bnx2x_hw_stats_post()
197 static void bnx2x_stats_comp(struct bnx2x *bp) bnx2x_stats_comp() argument
199 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_comp()
218 static void bnx2x_stats_pmf_update(struct bnx2x *bp) bnx2x_stats_pmf_update() argument
222 int loader_idx = PMF_DMAE_C(bp); bnx2x_stats_pmf_update()
223 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_pmf_update()
226 if (!bp->port.pmf || !bp->port.port_stx) { bnx2x_stats_pmf_update()
231 bp->executer_idx = 0; bnx2x_stats_pmf_update()
233 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); bnx2x_stats_pmf_update()
235 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_stats_pmf_update()
237 dmae->src_addr_lo = bp->port.port_stx >> 2; bnx2x_stats_pmf_update()
239 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_stats_pmf_update()
240 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_stats_pmf_update()
246 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_stats_pmf_update()
248 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; bnx2x_stats_pmf_update()
250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + bnx2x_stats_pmf_update()
252 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + bnx2x_stats_pmf_update()
254 dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; bnx2x_stats_pmf_update()
256 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_stats_pmf_update()
257 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_stats_pmf_update()
261 bnx2x_hw_stats_post(bp); bnx2x_stats_pmf_update()
262 bnx2x_stats_comp(bp); bnx2x_stats_pmf_update()
265 static void bnx2x_port_stats_init(struct bnx2x *bp) bnx2x_port_stats_init() argument
268 int port = BP_PORT(bp); bnx2x_port_stats_init()
270 int loader_idx = PMF_DMAE_C(bp); bnx2x_port_stats_init()
272 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_init()
275 if (!bp->link_vars.link_up || !bp->port.pmf) { bnx2x_port_stats_init()
280 bp->executer_idx = 0; bnx2x_port_stats_init()
283 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_port_stats_init()
286 if (bp->port.port_stx) { bnx2x_port_stats_init()
288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
290 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_init()
291 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_init()
292 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_init()
294 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_init()
300 if (bp->func_stx) { bnx2x_port_stats_init()
302 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
304 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_init()
305 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_init()
306 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_port_stats_init()
315 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, bnx2x_port_stats_init()
319 if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { bnx2x_port_stats_init()
323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init()
329 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init()
336 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
341 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init()
343 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init()
351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init()
358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + bnx2x_port_stats_init()
369 switch (bp->link_vars.mac_type) { bnx2x_port_stats_init()
376 if (CHIP_IS_E1x(bp)) { bnx2x_port_stats_init()
405 tx_len = sizeof(bp->slowpath-> bnx2x_port_stats_init()
407 rx_len = sizeof(bp->slowpath-> bnx2x_port_stats_init()
413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
418 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init()
419 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); bnx2x_port_stats_init()
425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
430 U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); bnx2x_port_stats_init()
432 U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); bnx2x_port_stats_init()
440 if (!CHIP_IS_E3(bp)) { bnx2x_port_stats_init()
441 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
446 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init()
448 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init()
455 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
460 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init()
462 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + bnx2x_port_stats_init()
470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_init()
471 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, bnx2x_port_stats_init()
476 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); bnx2x_port_stats_init()
477 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); bnx2x_port_stats_init()
480 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_init()
481 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_init()
487 static void bnx2x_func_stats_init(struct bnx2x *bp) bnx2x_func_stats_init() argument
489 struct dmae_command *dmae = &bp->stats_dmae; bnx2x_func_stats_init()
490 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_func_stats_init()
493 if (!bp->func_stx) { bnx2x_func_stats_init()
498 bp->executer_idx = 0; bnx2x_func_stats_init()
501 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_func_stats_init()
503 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_func_stats_init()
504 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_func_stats_init()
505 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_func_stats_init()
508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_func_stats_init()
509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_func_stats_init()
516 static void bnx2x_stats_start(struct bnx2x *bp) bnx2x_stats_start() argument
518 if (IS_PF(bp)) { bnx2x_stats_start()
519 if (bp->port.pmf) bnx2x_stats_start()
520 bnx2x_port_stats_init(bp); bnx2x_stats_start()
522 else if (bp->func_stx) bnx2x_stats_start()
523 bnx2x_func_stats_init(bp); bnx2x_stats_start()
525 bnx2x_hw_stats_post(bp); bnx2x_stats_start()
526 bnx2x_storm_stats_post(bp); bnx2x_stats_start()
530 static void bnx2x_stats_pmf_start(struct bnx2x *bp) bnx2x_stats_pmf_start() argument
532 bnx2x_stats_comp(bp); bnx2x_stats_pmf_start()
533 bnx2x_stats_pmf_update(bp); bnx2x_stats_pmf_start()
534 bnx2x_stats_start(bp); bnx2x_stats_pmf_start()
537 static void bnx2x_stats_restart(struct bnx2x *bp) bnx2x_stats_restart() argument
542 if (IS_VF(bp)) bnx2x_stats_restart()
545 bnx2x_stats_comp(bp); bnx2x_stats_restart()
546 bnx2x_stats_start(bp); bnx2x_stats_restart()
549 static void bnx2x_bmac_stats_update(struct bnx2x *bp) bnx2x_bmac_stats_update() argument
551 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_bmac_stats_update()
552 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_bmac_stats_update()
558 if (CHIP_IS_E1x(bp)) { bnx2x_bmac_stats_update()
559 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); bnx2x_bmac_stats_update()
594 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); bnx2x_bmac_stats_update()
655 static void bnx2x_mstat_stats_update(struct bnx2x *bp) bnx2x_mstat_stats_update() argument
657 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_mstat_stats_update()
658 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_mstat_stats_update()
660 struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); bnx2x_mstat_stats_update()
745 static void bnx2x_emac_stats_update(struct bnx2x *bp) bnx2x_emac_stats_update() argument
747 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); bnx2x_emac_stats_update()
748 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_emac_stats_update()
749 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_emac_stats_update()
802 static int bnx2x_hw_stats_update(struct bnx2x *bp) bnx2x_hw_stats_update() argument
804 struct nig_stats *new = bnx2x_sp(bp, nig_stats); bnx2x_hw_stats_update()
805 struct nig_stats *old = &(bp->port.old_nig_stats); bnx2x_hw_stats_update()
806 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); bnx2x_hw_stats_update()
807 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_hw_stats_update()
813 switch (bp->link_vars.mac_type) { bnx2x_hw_stats_update()
815 bnx2x_bmac_stats_update(bp); bnx2x_hw_stats_update()
819 bnx2x_emac_stats_update(bp); bnx2x_hw_stats_update()
824 bnx2x_mstat_stats_update(bp); bnx2x_hw_stats_update()
841 if (!CHIP_IS_E3(bp)) { bnx2x_hw_stats_update()
857 if (CHIP_IS_E3(bp)) { bnx2x_hw_stats_update()
858 u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 bnx2x_hw_stats_update()
860 estats->eee_tx_lpi += REG_RD(bp, lpi_reg); bnx2x_hw_stats_update()
863 if (!BP_NOMCP(bp)) { bnx2x_hw_stats_update()
865 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); bnx2x_hw_stats_update()
876 static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) bnx2x_storm_stats_validate_counters() argument
878 struct stats_counter *counters = &bp->fw_stats_data->storm_counters; bnx2x_storm_stats_validate_counters()
883 cur_stats_counter = bp->stats_counter - 1; bnx2x_storm_stats_validate_counters()
889 le16_to_cpu(counters->xstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters()
896 le16_to_cpu(counters->ustats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters()
903 le16_to_cpu(counters->cstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters()
910 le16_to_cpu(counters->tstats_counter), bp->stats_counter); bnx2x_storm_stats_validate_counters()
916 static int bnx2x_storm_stats_update(struct bnx2x *bp) bnx2x_storm_stats_update() argument
919 &bp->fw_stats_data->port.tstorm_port_statistics; bnx2x_storm_stats_update()
921 &bp->fw_stats_data->pf.tstorm_pf_statistics; bnx2x_storm_stats_update()
922 struct host_func_stats *fstats = &bp->func_stats; bnx2x_storm_stats_update()
923 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_storm_stats_update()
924 struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; bnx2x_storm_stats_update()
928 if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) bnx2x_storm_stats_update()
934 for_each_eth_queue(bp, i) { for_each_eth_queue()
935 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
937 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue()
940 &bnx2x_fp_stats(bp, fp)->old_tclient; for_each_eth_queue()
942 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue()
945 &bnx2x_fp_stats(bp, fp)->old_uclient; for_each_eth_queue()
947 &bp->fw_stats_data->queue_stats[i]. for_each_eth_queue()
950 &bnx2x_fp_stats(bp, fp)->old_xclient; for_each_eth_queue()
952 &bnx2x_fp_stats(bp, fp)->eth_q_stats; for_each_eth_queue()
954 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; for_each_eth_queue()
1099 if (bp->port.pmf) {
1100 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1109 bp->stats_pending = 0;
1114 static void bnx2x_net_stats_update(struct bnx2x *bp) bnx2x_net_stats_update() argument
1116 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_net_stats_update()
1117 struct net_device_stats *nstats = &bp->dev->stats; bnx2x_net_stats_update()
1136 for_each_rx_queue(bp, i) { for_each_rx_queue()
1138 &bp->fp_stats[i].old_tclient; for_each_rx_queue()
1141 nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
1184 static void bnx2x_drv_stats_update(struct bnx2x *bp) bnx2x_drv_stats_update() argument
1186 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_drv_stats_update()
1189 for_each_queue(bp, i) { for_each_queue()
1190 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; for_each_queue()
1192 &bp->fp_stats[i].eth_q_stats_old; for_each_queue()
1202 static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) bnx2x_edebug_stats_stopped() argument
1206 if (SHMEM2_HAS(bp, edebug_driver_if[1])) { bnx2x_edebug_stats_stopped()
1207 val = SHMEM2_RD(bp, edebug_driver_if[1]); bnx2x_edebug_stats_stopped()
1216 static void bnx2x_stats_update(struct bnx2x *bp) bnx2x_stats_update() argument
1218 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_stats_update()
1220 if (bnx2x_edebug_stats_stopped(bp)) bnx2x_stats_update()
1223 if (IS_PF(bp)) { bnx2x_stats_update()
1227 if (bp->port.pmf) bnx2x_stats_update()
1228 bnx2x_hw_stats_update(bp); bnx2x_stats_update()
1230 if (bnx2x_storm_stats_update(bp)) { bnx2x_stats_update()
1231 if (bp->stats_pending++ == 3) { bnx2x_stats_update()
1241 bnx2x_storm_stats_update(bp); bnx2x_stats_update()
1244 bnx2x_net_stats_update(bp); bnx2x_stats_update()
1245 bnx2x_drv_stats_update(bp); bnx2x_stats_update()
1248 if (IS_VF(bp)) bnx2x_stats_update()
1251 if (netif_msg_timer(bp)) { bnx2x_stats_update()
1252 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_stats_update()
1254 netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", bnx2x_stats_update()
1258 bnx2x_hw_stats_post(bp); bnx2x_stats_update()
1259 bnx2x_storm_stats_post(bp); bnx2x_stats_update()
1262 static void bnx2x_port_stats_stop(struct bnx2x *bp) bnx2x_port_stats_stop() argument
1266 int loader_idx = PMF_DMAE_C(bp); bnx2x_port_stats_stop()
1267 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_stop()
1269 bp->executer_idx = 0; bnx2x_port_stats_stop()
1271 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); bnx2x_port_stats_stop()
1273 if (bp->port.port_stx) { bnx2x_port_stats_stop()
1275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_stop()
1276 if (bp->func_stx) bnx2x_port_stats_stop()
1283 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_stop()
1284 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_stop()
1285 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_stop()
1287 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_stop()
1288 if (bp->func_stx) { bnx2x_port_stats_stop()
1294 U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop()
1296 U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop()
1303 if (bp->func_stx) { bnx2x_port_stats_stop()
1305 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_stop()
1308 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_stop()
1309 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); bnx2x_port_stats_stop()
1310 dmae->dst_addr_lo = bp->func_stx >> 2; bnx2x_port_stats_stop()
1313 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop()
1314 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_stop()
1321 static void bnx2x_stats_stop(struct bnx2x *bp) bnx2x_stats_stop() argument
1325 bnx2x_stats_comp(bp); bnx2x_stats_stop()
1327 if (bp->port.pmf) bnx2x_stats_stop()
1328 update = (bnx2x_hw_stats_update(bp) == 0); bnx2x_stats_stop()
1330 update |= (bnx2x_storm_stats_update(bp) == 0); bnx2x_stats_stop()
1333 bnx2x_net_stats_update(bp); bnx2x_stats_stop()
1335 if (bp->port.pmf) bnx2x_stats_stop()
1336 bnx2x_port_stats_stop(bp); bnx2x_stats_stop()
1338 bnx2x_hw_stats_post(bp); bnx2x_stats_stop()
1339 bnx2x_stats_comp(bp); bnx2x_stats_stop()
1343 static void bnx2x_stats_do_nothing(struct bnx2x *bp) bnx2x_stats_do_nothing() argument
1348 void (*action)(struct bnx2x *bp);
1366 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) bnx2x_stats_handle() argument
1368 enum bnx2x_stats_state state = bp->stats_state; bnx2x_stats_handle()
1370 if (unlikely(bp->panic)) bnx2x_stats_handle()
1377 if (down_trylock(&bp->stats_lock)) { bnx2x_stats_handle()
1383 if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { bnx2x_stats_handle()
1390 bnx2x_stats_stm[state][event].action(bp); bnx2x_stats_handle()
1391 bp->stats_state = bnx2x_stats_stm[state][event].next_state; bnx2x_stats_handle()
1393 up(&bp->stats_lock); bnx2x_stats_handle()
1395 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) bnx2x_stats_handle()
1397 state, event, bp->stats_state); bnx2x_stats_handle()
1400 static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_port_stats_base_init() argument
1403 u32 *stats_comp = bnx2x_sp(bp, stats_comp); bnx2x_port_stats_base_init()
1406 if (!bp->port.pmf || !bp->port.port_stx) { bnx2x_port_stats_base_init()
1411 bp->executer_idx = 0; bnx2x_port_stats_base_init()
1413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); bnx2x_port_stats_base_init()
1414 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, bnx2x_port_stats_base_init()
1416 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_base_init()
1417 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); bnx2x_port_stats_base_init()
1418 dmae->dst_addr_lo = bp->port.port_stx >> 2; bnx2x_port_stats_base_init()
1420 dmae->len = bnx2x_get_port_stats_dma_len(bp); bnx2x_port_stats_base_init()
1421 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_base_init()
1422 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); bnx2x_port_stats_base_init()
1426 bnx2x_hw_stats_post(bp); bnx2x_port_stats_base_init()
1427 bnx2x_stats_comp(bp); bnx2x_port_stats_base_init()
1434 static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) bnx2x_prep_fw_stats_req() argument
1438 struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; bnx2x_prep_fw_stats_req()
1443 stats_hdr->cmd_num = bp->fw_stats_num; bnx2x_prep_fw_stats_req()
1452 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req()
1463 memset(&bp->fw_stats_data->storm_counters, 0xff, bnx2x_prep_fw_stats_req()
1467 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req()
1470 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; bnx2x_prep_fw_stats_req()
1474 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req()
1476 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req()
1481 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req()
1484 cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; bnx2x_prep_fw_stats_req()
1488 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req()
1489 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req()
1494 if (!NO_FCOE(bp)) { bnx2x_prep_fw_stats_req()
1495 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req()
1499 &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; bnx2x_prep_fw_stats_req()
1503 cur_query_entry->index = BP_PORT(bp); bnx2x_prep_fw_stats_req()
1504 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); bnx2x_prep_fw_stats_req()
1512 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_prep_fw_stats_req()
1518 if (!NO_FCOE(bp)) bnx2x_prep_fw_stats_req()
1523 for_each_eth_queue(bp, i) { for_each_eth_queue()
1525 &bp->fw_stats_req-> for_each_eth_queue()
1529 cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); for_each_eth_queue()
1530 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); for_each_eth_queue()
1540 if (!NO_FCOE(bp)) {
1542 &bp->fw_stats_req->
1546 cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
1547 cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
1555 void bnx2x_memset_stats(struct bnx2x *bp) bnx2x_memset_stats() argument
1560 for_each_queue(bp, i) { for_each_queue()
1561 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; for_each_queue()
1569 if (bp->stats_init) { for_each_queue()
1577 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1579 if (bp->stats_init) {
1580 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1581 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1582 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1583 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1584 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1587 bp->stats_state = STATS_STATE_DISABLED;
1589 if (bp->port.pmf && bp->port.port_stx)
1590 bnx2x_port_stats_base_init(bp);
1593 bp->stats_init = false;
1596 void bnx2x_stats_init(struct bnx2x *bp) bnx2x_stats_init() argument
1598 int /*abs*/port = BP_PORT(bp); bnx2x_stats_init()
1599 int mb_idx = BP_FW_MB_IDX(bp); bnx2x_stats_init()
1601 if (IS_VF(bp)) { bnx2x_stats_init()
1602 bnx2x_memset_stats(bp); bnx2x_stats_init()
1606 bp->stats_pending = 0; bnx2x_stats_init()
1607 bp->executer_idx = 0; bnx2x_stats_init()
1608 bp->stats_counter = 0; bnx2x_stats_init()
1611 if (!BP_NOMCP(bp)) { bnx2x_stats_init()
1612 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); bnx2x_stats_init()
1613 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); bnx2x_stats_init()
1616 bp->port.port_stx = 0; bnx2x_stats_init()
1617 bp->func_stx = 0; bnx2x_stats_init()
1620 bp->port.port_stx, bp->func_stx); bnx2x_stats_init()
1623 if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) bnx2x_stats_init()
1624 bnx2x_stats_handle(bp, STATS_EVENT_PMF); bnx2x_stats_init()
1626 port = BP_PORT(bp); bnx2x_stats_init()
1628 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); bnx2x_stats_init()
1629 bp->port.old_nig_stats.brb_discard = bnx2x_stats_init()
1630 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); bnx2x_stats_init()
1631 bp->port.old_nig_stats.brb_truncate = bnx2x_stats_init()
1632 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); bnx2x_stats_init()
1633 if (!CHIP_IS_E3(bp)) { bnx2x_stats_init()
1634 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, bnx2x_stats_init()
1635 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); bnx2x_stats_init()
1636 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, bnx2x_stats_init()
1637 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); bnx2x_stats_init()
1641 bnx2x_prep_fw_stats_req(bp); bnx2x_stats_init()
1644 if (bp->stats_init) { bnx2x_stats_init()
1645 if (bp->func_stx) { bnx2x_stats_init()
1646 memset(bnx2x_sp(bp, func_stats), 0, bnx2x_stats_init()
1648 bnx2x_func_stats_init(bp); bnx2x_stats_init()
1649 bnx2x_hw_stats_post(bp); bnx2x_stats_init()
1650 bnx2x_stats_comp(bp); bnx2x_stats_init()
1654 bnx2x_memset_stats(bp); bnx2x_stats_init()
1657 void bnx2x_save_statistics(struct bnx2x *bp) bnx2x_save_statistics() argument
1660 struct net_device_stats *nstats = &bp->dev->stats; bnx2x_save_statistics()
1663 for_each_eth_queue(bp, i) { for_each_eth_queue()
1664 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
1666 &bnx2x_fp_stats(bp, fp)->eth_q_stats; for_each_eth_queue()
1668 &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; for_each_eth_queue()
1687 bp->net_stats_old.rx_dropped = nstats->rx_dropped;
1690 if (bp->port.pmf && IS_MF(bp)) {
1691 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1692 struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
1700 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, bnx2x_afex_collect_stats() argument
1705 struct bnx2x_eth_stats *estats = &bp->eth_stats; bnx2x_afex_collect_stats()
1707 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; bnx2x_afex_collect_stats()
1719 &bp->fw_stats_data->fcoe; bnx2x_afex_collect_stats()
1723 for_each_eth_queue(bp, i) { for_each_eth_queue()
1724 struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; for_each_eth_queue()
1818 if (!NO_FCOE(bp)) {
1954 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1970 int bnx2x_stats_safe_exec(struct bnx2x *bp, bnx2x_stats_safe_exec() argument
1979 rc = down_timeout(&bp->stats_lock, HZ / 10); bnx2x_stats_safe_exec()
1985 bnx2x_stats_comp(bp); bnx2x_stats_safe_exec()
1986 while (bp->stats_pending && cnt--) bnx2x_stats_safe_exec()
1987 if (bnx2x_storm_stats_update(bp)) bnx2x_stats_safe_exec()
1989 if (bp->stats_pending) { bnx2x_stats_safe_exec()
2001 up(&bp->stats_lock); bnx2x_stats_safe_exec()
H A Dbnx2x_cmn.h39 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
55 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
64 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
82 * @bp: driver handle
87 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
92 * @bp: driver handle
95 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
100 * @bp: driver handle
106 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
112 * @bp: driver handle
118 void bnx2x__init_func_obj(struct bnx2x *bp);
123 * @bp: driver handle
128 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
134 * @bp: driver handle
136 int bnx2x_setup_leading(struct bnx2x *bp);
141 * @bp: driver handle
147 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
152 * @bp: driver handle
155 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
160 * @bp: driver handle
162 void bnx2x_link_set(struct bnx2x *bp);
168 * @bp: driver handle
170 void bnx2x_force_link_reset(struct bnx2x *bp);
175 * @bp: driver handle
180 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
185 * @bp: driver handle
187 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
190 void bnx2x_drv_pulse(struct bnx2x *bp);
195 * @bp: driver handle
202 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
206 void bnx2x_pf_disable(struct bnx2x *bp);
207 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
212 * @bp: driver handle
214 void bnx2x__link_status_update(struct bnx2x *bp);
219 * @bp: driver handle
221 void bnx2x_link_report(struct bnx2x *bp);
224 void __bnx2x_link_report(struct bnx2x *bp);
229 * @bp: driver handle
233 u16 bnx2x_get_mf_speed(struct bnx2x *bp);
254 * @bp: driver handle
257 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
262 * @bp: driver handle
264 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
269 * @bp: driver handle
271 void bnx2x_setup_cnic_info(struct bnx2x *bp);
276 * @bp: driver handle
278 void bnx2x_int_enable(struct bnx2x *bp);
283 * @bp: driver handle
289 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
294 * @bp: driver handle
302 void bnx2x_nic_init_cnic(struct bnx2x *bp);
307 * @bp: driver handle
314 void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
319 * @bp: driver handle
327 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
331 * @bp: driver handle
333 int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
337 * @bp: driver handle
339 int bnx2x_alloc_mem(struct bnx2x *bp);
344 * @bp: driver handle
346 void bnx2x_free_mem_cnic(struct bnx2x *bp);
350 * @bp: driver handle
352 void bnx2x_free_mem(struct bnx2x *bp);
357 * @bp: driver handle
359 void bnx2x_set_num_queues(struct bnx2x *bp);
364 * @bp: driver handle
372 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
377 * @bp: driver handle
380 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
385 * @bp: driver handle
388 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
393 * @bp: driver handle
395 int bnx2x_release_leader_lock(struct bnx2x *bp);
400 * @bp: driver handle
405 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
413 * If bp->state is OPEN, should be called with
416 void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
419 void bnx2x_set_pf_load(struct bnx2x *bp);
420 bool bnx2x_clear_pf_load(struct bnx2x *bp);
421 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
422 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
423 void bnx2x_set_reset_in_progress(struct bnx2x *bp);
424 void bnx2x_set_reset_global(struct bnx2x *bp);
425 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
439 * @bp: driver handle
441 void bnx2x_ilt_set_info(struct bnx2x *bp);
447 * @bp: driver handle
449 void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
454 * @bp: driver handle
456 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
461 * @bp: driver handle
466 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
471 * @bp: driver handle
474 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
476 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
479 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
482 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
499 static inline void bnx2x_update_rx_prod(struct bnx2x *bp, bnx2x_update_rx_prod() argument
522 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, bnx2x_update_rx_prod()
538 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
545 void bnx2x_free_irq(struct bnx2x *bp);
547 void bnx2x_free_fp_mem(struct bnx2x *bp);
548 void bnx2x_init_rx_rings(struct bnx2x *bp);
549 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
550 void bnx2x_free_skbs(struct bnx2x *bp);
551 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
552 void bnx2x_netif_start(struct bnx2x *bp);
553 int bnx2x_load_cnic(struct bnx2x *bp);
558 * @bp: driver handle
563 int bnx2x_enable_msix(struct bnx2x *bp);
568 * @bp: driver handle
570 int bnx2x_enable_msi(struct bnx2x *bp);
582 * @bp: driver handle
584 int bnx2x_alloc_mem_bp(struct bnx2x *bp);
589 * @bp: driver handle
591 void bnx2x_free_mem_bp(struct bnx2x *bp);
627 * @bp: driver handle
631 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
641 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, bnx2x_igu_ack_sb_gen() argument
655 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); bnx2x_igu_ack_sb_gen()
662 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, bnx2x_hc_ack_sb() argument
665 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + bnx2x_hc_ack_sb()
676 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); bnx2x_hc_ack_sb()
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, bnx2x_ack_sb() argument
686 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_ack_sb()
687 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); bnx2x_ack_sb()
691 if (CHIP_INT_MODE_IS_BC(bp)) bnx2x_ack_sb()
693 else if (igu_sb_id != bp->igu_dsb_id) bnx2x_ack_sb()
699 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); bnx2x_ack_sb()
703 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) bnx2x_hc_ack_int() argument
705 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + bnx2x_hc_ack_int()
707 u32 result = REG_RD(bp, hc_addr); bnx2x_hc_ack_int()
713 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) bnx2x_igu_ack_int() argument
716 u32 result = REG_RD(bp, igu_addr); bnx2x_igu_ack_int()
725 static inline u16 bnx2x_ack_int(struct bnx2x *bp) bnx2x_ack_int() argument
728 if (bp->common.int_block == INT_BLOCK_HC) bnx2x_ack_int()
729 return bnx2x_hc_ack_int(bp); bnx2x_ack_int()
731 return bnx2x_igu_ack_int(bp); bnx2x_ack_int()
741 static inline u16 bnx2x_tx_avail(struct bnx2x *bp, bnx2x_tx_avail() argument
798 * @bp: driver handle
800 static inline void bnx2x_tx_disable(struct bnx2x *bp) bnx2x_tx_disable() argument
802 netif_tx_disable(bp->dev); bnx2x_tx_disable()
803 netif_carrier_off(bp->dev); bnx2x_tx_disable()
806 static inline void bnx2x_free_rx_sge(struct bnx2x *bp, bnx2x_free_rx_sge() argument
820 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), bnx2x_free_rx_sge()
830 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) bnx2x_del_all_napi_cnic() argument
834 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic()
835 napi_hash_del(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic()
836 netif_napi_del(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic()
840 static inline void bnx2x_del_all_napi(struct bnx2x *bp) bnx2x_del_all_napi() argument
844 for_each_eth_queue(bp, i) { for_each_eth_queue()
845 napi_hash_del(&bnx2x_fp(bp, i, napi)); for_each_eth_queue()
846 netif_napi_del(&bnx2x_fp(bp, i, napi)); for_each_eth_queue()
850 int bnx2x_set_int_mode(struct bnx2x *bp);
852 static inline void bnx2x_disable_msi(struct bnx2x *bp) bnx2x_disable_msi() argument
854 if (bp->flags & USING_MSIX_FLAG) { bnx2x_disable_msi()
855 pci_disable_msix(bp->pdev); bnx2x_disable_msi()
856 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); bnx2x_disable_msi()
857 } else if (bp->flags & USING_MSI_FLAG) { bnx2x_disable_msi()
858 pci_disable_msi(bp->pdev); bnx2x_disable_msi()
859 bp->flags &= ~USING_MSI_FLAG; bnx2x_disable_msi()
911 static inline int func_by_vn(struct bnx2x *bp, int vn) func_by_vn() argument
913 return 2 * vn + BP_PORT(bp); func_by_vn()
916 static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) bnx2x_config_rss_eth() argument
918 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); bnx2x_config_rss_eth()
924 * @bp: driver handle
928 static inline int bnx2x_func_start(struct bnx2x *bp) bnx2x_func_start() argument
937 func_params.f_obj = &bp->func_obj; bnx2x_func_start()
941 start_params->mf_mode = bp->mf_mode; bnx2x_func_start()
942 start_params->sd_vlan_tag = bp->mf_ov; bnx2x_func_start()
945 if (IS_MF_BD(bp)) { bnx2x_func_start()
948 REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD); bnx2x_func_start()
949 REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD); bnx2x_func_start()
950 REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD); bnx2x_func_start()
952 bnx2x_get_c2s_mapping(bp, start_params->c2s_pri, bnx2x_func_start()
965 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) bnx2x_func_start()
970 start_params->vxlan_dst_port = bp->vxlan_dst_port; bnx2x_func_start()
974 if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { bnx2x_func_start()
980 return bnx2x_func_state_change(bp, &func_params); bnx2x_func_start()
1002 static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, bnx2x_free_rx_mem_pool() argument
1013 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, bnx2x_free_rx_sge_range() argument
1022 bnx2x_free_rx_sge(bp, fp, i); bnx2x_free_rx_sge_range()
1024 bnx2x_free_rx_mem_pool(bp, &fp->page_pool); bnx2x_free_rx_sge_range()
1049 struct bnx2x *bp = fp->bp; bnx2x_stats_id() local
1050 if (!CHIP_IS_E1x(bp)) { bnx2x_stats_id()
1053 return bp->cnic_base_cl_id + (bp->pf_num >> 1); bnx2x_stats_id()
1056 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; bnx2x_stats_id()
1062 struct bnx2x *bp = fp->bp; bnx2x_init_vlan_mac_fp_objs() local
1065 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, bnx2x_init_vlan_mac_fp_objs()
1066 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_init_vlan_mac_fp_objs()
1067 bnx2x_sp_mapping(bp, mac_rdata), bnx2x_init_vlan_mac_fp_objs()
1069 &bp->sp_state, obj_type, bnx2x_init_vlan_mac_fp_objs()
1070 &bp->macs_pool); bnx2x_init_vlan_mac_fp_objs()
1072 if (!CHIP_IS_E1x(bp)) bnx2x_init_vlan_mac_fp_objs()
1073 bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj, bnx2x_init_vlan_mac_fp_objs()
1074 fp->cl_id, fp->cid, BP_FUNC(bp), bnx2x_init_vlan_mac_fp_objs()
1075 bnx2x_sp(bp, vlan_rdata), bnx2x_init_vlan_mac_fp_objs()
1076 bnx2x_sp_mapping(bp, vlan_rdata), bnx2x_init_vlan_mac_fp_objs()
1078 &bp->sp_state, obj_type, bnx2x_init_vlan_mac_fp_objs()
1079 &bp->vlans_pool); bnx2x_init_vlan_mac_fp_objs()
1085 * @bp: driver handle
1090 static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) bnx2x_get_path_func_num() argument
1095 if (CHIP_IS_E1(bp)) bnx2x_get_path_func_num()
1101 if (CHIP_REV_IS_SLOW(bp)) { bnx2x_get_path_func_num()
1102 if (IS_MF(bp)) bnx2x_get_path_func_num()
1109 MF_CFG_RD(bp, bnx2x_get_path_func_num()
1110 func_mf_config[BP_PORT(bp) + 2 * i]. bnx2x_get_path_func_num()
1122 static inline void bnx2x_init_bp_objs(struct bnx2x *bp) bnx2x_init_bp_objs() argument
1125 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); bnx2x_init_bp_objs()
1128 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, bnx2x_init_bp_objs()
1129 BP_FUNC(bp), BP_FUNC(bp), bnx2x_init_bp_objs()
1130 bnx2x_sp(bp, mcast_rdata), bnx2x_init_bp_objs()
1131 bnx2x_sp_mapping(bp, mcast_rdata), bnx2x_init_bp_objs()
1132 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, bnx2x_init_bp_objs()
1136 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_init_bp_objs()
1137 bnx2x_get_path_func_num(bp)); bnx2x_init_bp_objs()
1139 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp), bnx2x_init_bp_objs()
1140 bnx2x_get_path_func_num(bp)); bnx2x_init_bp_objs()
1143 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, bnx2x_init_bp_objs()
1144 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), bnx2x_init_bp_objs()
1145 bnx2x_sp(bp, rss_rdata), bnx2x_init_bp_objs()
1146 bnx2x_sp_mapping(bp, rss_rdata), bnx2x_init_bp_objs()
1147 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, bnx2x_init_bp_objs()
1150 bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp)); bnx2x_init_bp_objs()
1155 if (CHIP_IS_E1x(fp->bp)) bnx2x_fp_qzone_id()
1156 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; bnx2x_fp_qzone_id()
1161 static inline void bnx2x_init_txdata(struct bnx2x *bp, bnx2x_init_txdata() argument
1170 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; bnx2x_init_txdata()
1176 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) bnx2x_cnic_eth_cl_id() argument
1178 return bp->cnic_base_cl_id + cl_idx + bnx2x_cnic_eth_cl_id()
1179 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; bnx2x_cnic_eth_cl_id()
1182 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) bnx2x_cnic_fw_sb_id() argument
1185 return bp->base_fw_ndsb; bnx2x_cnic_fw_sb_id()
1188 static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) bnx2x_cnic_igu_sb_id() argument
1190 return bp->igu_base_sb; bnx2x_cnic_igu_sb_id()
1193 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, bnx2x_clean_tx_queue() argument
1217 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1219 static inline void __storm_memset_struct(struct bnx2x *bp, __storm_memset_struct() argument
1224 REG_WR(bp, addr + (i * 4), data[i]); __storm_memset_struct()
1230 * @bp: driver handle
1233 static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) bnx2x_wait_sp_comp() argument
1239 netif_addr_lock_bh(bp->dev); bnx2x_wait_sp_comp()
1240 if (!(bp->sp_state & mask)) { bnx2x_wait_sp_comp()
1241 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp()
1244 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp()
1251 netif_addr_lock_bh(bp->dev); bnx2x_wait_sp_comp()
1252 if (bp->sp_state & mask) { bnx2x_wait_sp_comp()
1254 bp->sp_state, mask); bnx2x_wait_sp_comp()
1255 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp()
1258 netif_addr_unlock_bh(bp->dev); bnx2x_wait_sp_comp()
1266 * @bp: driver handle
1270 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1273 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1275 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1276 void bnx2x_release_phy_lock(struct bnx2x *bp);
1281 * @bp: driver handle
1285 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) bnx2x_extract_max_cfg() argument
1313 * @bp: driver handle
1316 void bnx2x_get_iscsi_info(struct bnx2x *bp);
1321 * @bp: driver handle
1324 static inline void bnx2x_link_sync_notify(struct bnx2x *bp) bnx2x_link_sync_notify() argument
1330 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { bnx2x_link_sync_notify()
1331 if (vn == BP_VN(bp)) bnx2x_link_sync_notify()
1334 func = func_by_vn(bp, vn); bnx2x_link_sync_notify()
1335 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + bnx2x_link_sync_notify()
1343 * @bp: driver handle
1348 static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) bnx2x_update_drv_flags() argument
1350 if (SHMEM2_HAS(bp, drv_flags)) { bnx2x_update_drv_flags()
1352 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); bnx2x_update_drv_flags()
1353 drv_flags = SHMEM2_RD(bp, drv_flags); bnx2x_update_drv_flags()
1360 SHMEM2_WR(bp, drv_flags, drv_flags); bnx2x_update_drv_flags()
1362 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); bnx2x_update_drv_flags()
1371 * @bp: driver handle
1376 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1378 int bnx2x_drain_tx_queues(struct bnx2x *bp);
1379 void bnx2x_squeeze_objects(struct bnx2x *bp);
1387 * @bp: driver handle
1390 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
1395 * @bp: driver handle
1400 int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
H A Dbnx2x_init_ops.h22 #define BP_ILT(bp) NULL
26 #define BP_FUNC(bp) 0
30 #define BP_PORT(bp) 0
45 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
46 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
47 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
51 static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, bnx2x_init_str_wr() argument
57 REG_WR(bp, addr + i*4, data[i]); bnx2x_init_str_wr()
60 static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, bnx2x_init_ind_wr() argument
66 bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); bnx2x_init_ind_wr()
69 static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, bnx2x_write_big_buf() argument
72 if (bp->dmae_ready) bnx2x_write_big_buf()
73 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); bnx2x_write_big_buf()
76 else if (wb && CHIP_IS_E1(bp)) bnx2x_write_big_buf()
77 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf()
81 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf()
84 static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, bnx2x_init_fill() argument
91 memset(GUNZIP_BUF(bp), (u8)fill, buf_len); bnx2x_init_fill()
96 bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); bnx2x_init_fill()
100 static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) bnx2x_write_big_buf_wb() argument
102 if (bp->dmae_ready) bnx2x_write_big_buf_wb()
103 bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); bnx2x_write_big_buf_wb()
106 else if (CHIP_IS_E1(bp)) bnx2x_write_big_buf_wb()
107 bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf_wb()
111 bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); bnx2x_write_big_buf_wb()
114 static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, bnx2x_init_wr_64() argument
127 u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i; bnx2x_init_wr_64()
135 bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); bnx2x_init_wr_64()
153 static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, bnx2x_sel_blob() argument
157 data = INIT_TSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob()
160 data = INIT_CSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob()
163 data = INIT_USEM_INT_TABLE_DATA(bp); bnx2x_sel_blob()
166 data = INIT_XSEM_INT_TABLE_DATA(bp); bnx2x_sel_blob()
169 data = INIT_TSEM_PRAM_DATA(bp); bnx2x_sel_blob()
172 data = INIT_CSEM_PRAM_DATA(bp); bnx2x_sel_blob()
175 data = INIT_USEM_PRAM_DATA(bp); bnx2x_sel_blob()
178 data = INIT_XSEM_PRAM_DATA(bp); bnx2x_sel_blob()
183 static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, bnx2x_init_wr_wb() argument
186 if (bp->dmae_ready) bnx2x_init_wr_wb()
187 VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); bnx2x_init_wr_wb()
190 else if (CHIP_IS_E1(bp)) bnx2x_init_wr_wb()
191 bnx2x_init_ind_wr(bp, addr, data, len); bnx2x_init_wr_wb()
195 bnx2x_init_str_wr(bp, addr, data, len); bnx2x_init_wr_wb()
198 static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, bnx2x_wr_64() argument
205 REG_WR_DMAE_LEN(bp, reg, wb_write, 2); bnx2x_wr_64()
207 static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, bnx2x_init_wr_zp() argument
214 data = bnx2x_sel_blob(bp, addr, data) + blob_off*4; bnx2x_init_wr_zp()
216 rc = bnx2x_gunzip(bp, data, len); bnx2x_init_wr_zp()
221 len = GUNZIP_OUTLEN(bp); bnx2x_init_wr_zp()
223 ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32) bnx2x_init_wr_zp()
224 cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); bnx2x_init_wr_zp()
226 bnx2x_write_big_buf_wb(bp, addr, len); bnx2x_init_wr_zp()
229 static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) bnx2x_init_block() argument
232 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, bnx2x_init_block()
235 INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, bnx2x_init_block()
245 data_base = INIT_DATA(bp); bnx2x_init_block()
249 op = (const union init_op *)&(INIT_OPS(bp)[op_idx]); bnx2x_init_block()
262 REG_RD(bp, addr); bnx2x_init_block()
265 REG_WR(bp, addr, op->write.val); bnx2x_init_block()
268 bnx2x_init_str_wr(bp, addr, data, len); bnx2x_init_block()
271 bnx2x_init_wr_wb(bp, addr, data, len); bnx2x_init_block()
274 bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); bnx2x_init_block()
277 bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); bnx2x_init_block()
280 bnx2x_init_wr_zp(bp, addr, len, bnx2x_init_block()
284 bnx2x_init_wr_64(bp, addr, data, len); bnx2x_init_block()
290 if ((INIT_MODE_FLAGS(bp) & bnx2x_init_block()
299 if ((INIT_MODE_FLAGS(bp) & bnx2x_init_block()
475 static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, bnx2x_init_pxp_arb() argument
490 if (CHIP_REV_IS_FPGA(bp)) { bnx2x_init_pxp_arb()
497 REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); bnx2x_init_pxp_arb()
498 REG_WR(bp, read_arb_addr[i].add, bnx2x_init_pxp_arb()
500 REG_WR(bp, read_arb_addr[i].ubound, bnx2x_init_pxp_arb()
508 REG_WR(bp, write_arb_addr[i].l, bnx2x_init_pxp_arb()
511 REG_WR(bp, write_arb_addr[i].add, bnx2x_init_pxp_arb()
514 REG_WR(bp, write_arb_addr[i].ubound, bnx2x_init_pxp_arb()
518 val = REG_RD(bp, write_arb_addr[i].l); bnx2x_init_pxp_arb()
519 REG_WR(bp, write_arb_addr[i].l, bnx2x_init_pxp_arb()
522 val = REG_RD(bp, write_arb_addr[i].add); bnx2x_init_pxp_arb()
523 REG_WR(bp, write_arb_addr[i].add, bnx2x_init_pxp_arb()
526 val = REG_RD(bp, write_arb_addr[i].ubound); bnx2x_init_pxp_arb()
527 REG_WR(bp, write_arb_addr[i].ubound, bnx2x_init_pxp_arb()
535 REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val); bnx2x_init_pxp_arb()
540 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); bnx2x_init_pxp_arb()
542 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); bnx2x_init_pxp_arb()
543 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); bnx2x_init_pxp_arb()
544 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); bnx2x_init_pxp_arb()
545 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); bnx2x_init_pxp_arb()
547 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) bnx2x_init_pxp_arb()
548 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); bnx2x_init_pxp_arb()
550 if (CHIP_IS_E3(bp)) bnx2x_init_pxp_arb()
551 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); bnx2x_init_pxp_arb()
552 else if (CHIP_IS_E2(bp)) bnx2x_init_pxp_arb()
553 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); bnx2x_init_pxp_arb()
555 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); bnx2x_init_pxp_arb()
557 if (!CHIP_IS_E1(bp)) { bnx2x_init_pxp_arb()
564 if (!CHIP_IS_E1H(bp)) { bnx2x_init_pxp_arb()
567 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); bnx2x_init_pxp_arb()
570 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); bnx2x_init_pxp_arb()
573 REG_WR(bp, PXP2_REG_WR_HC_MPS, val); bnx2x_init_pxp_arb()
574 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); bnx2x_init_pxp_arb()
575 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); bnx2x_init_pxp_arb()
576 REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val); bnx2x_init_pxp_arb()
577 REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val); bnx2x_init_pxp_arb()
578 REG_WR(bp, PXP2_REG_WR_QM_MPS, val); bnx2x_init_pxp_arb()
579 REG_WR(bp, PXP2_REG_WR_TM_MPS, val); bnx2x_init_pxp_arb()
580 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); bnx2x_init_pxp_arb()
581 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); bnx2x_init_pxp_arb()
582 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); bnx2x_init_pxp_arb()
587 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST); bnx2x_init_pxp_arb()
590 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20); bnx2x_init_pxp_arb()
618 static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, bnx2x_ilt_line_mem_op() argument
633 static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, bnx2x_ilt_client_mem_op() argument
637 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_mem_op()
647 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i], bnx2x_ilt_client_mem_op()
653 static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop) bnx2x_ilt_mem_op_cnic() argument
657 if (CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_mem_op_cnic()
658 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); bnx2x_ilt_mem_op_cnic()
660 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); bnx2x_ilt_mem_op_cnic()
665 static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) bnx2x_ilt_mem_op() argument
667 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); bnx2x_ilt_mem_op()
669 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); bnx2x_ilt_mem_op()
670 if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_mem_op()
671 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); bnx2x_ilt_mem_op()
676 static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, bnx2x_ilt_line_wr() argument
681 if (CHIP_IS_E1(bp)) bnx2x_ilt_line_wr()
686 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); bnx2x_ilt_line_wr()
689 static void bnx2x_ilt_line_init_op(struct bnx2x *bp, bnx2x_ilt_line_init_op() argument
700 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); bnx2x_ilt_line_init_op()
704 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping); bnx2x_ilt_line_init_op()
709 static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, bnx2x_ilt_boundry_init_op() argument
720 if (CHIP_IS_E1(bp)) { bnx2x_ilt_boundry_init_op()
735 REG_WR(bp, start_reg + BP_FUNC(bp)*4, bnx2x_ilt_boundry_init_op()
757 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); bnx2x_ilt_boundry_init_op()
758 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end)); bnx2x_ilt_boundry_init_op()
762 static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, bnx2x_ilt_client_init_op_ilt() argument
773 bnx2x_ilt_line_init_op(bp, ilt, i, initop); bnx2x_ilt_client_init_op_ilt()
776 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); bnx2x_ilt_client_init_op_ilt()
779 static void bnx2x_ilt_client_init_op(struct bnx2x *bp, bnx2x_ilt_client_init_op() argument
782 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_init_op()
784 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop); bnx2x_ilt_client_init_op()
787 static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, bnx2x_ilt_client_id_init_op() argument
790 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_client_id_init_op()
793 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); bnx2x_ilt_client_id_init_op()
796 static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_op_cnic() argument
798 if (CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_init_op_cnic()
799 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); bnx2x_ilt_init_op_cnic()
800 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); bnx2x_ilt_init_op_cnic()
803 static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_op() argument
805 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); bnx2x_ilt_init_op()
806 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); bnx2x_ilt_init_op()
807 if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) bnx2x_ilt_init_op()
808 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); bnx2x_ilt_init_op()
811 static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, bnx2x_ilt_init_client_psz() argument
814 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_ilt_init_client_psz()
824 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12)); bnx2x_ilt_init_client_psz()
835 static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) bnx2x_ilt_init_page_size() argument
837 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, bnx2x_ilt_init_page_size()
839 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM, bnx2x_ilt_init_page_size()
841 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC, bnx2x_ilt_init_page_size()
843 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM, bnx2x_ilt_init_page_size()
855 static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_init_cid_count() argument
858 int port = BP_PORT(bp); bnx2x_qm_init_cid_count()
865 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, bnx2x_qm_init_cid_count()
874 static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_set_ptr_table() argument
880 REG_WR(bp, base_reg + i*4, bnx2x_qm_set_ptr_table()
882 bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2); bnx2x_qm_set_ptr_table()
887 static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, bnx2x_qm_init_ptr_table() argument
897 bnx2x_qm_set_ptr_table(bp, qm_cid_count, bnx2x_qm_init_ptr_table()
899 if (CHIP_IS_E1H(bp)) bnx2x_qm_init_ptr_table()
900 bnx2x_qm_set_ptr_table(bp, qm_cid_count, bnx2x_qm_init_ptr_table()
913 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, bnx2x_src_init_t2() argument
917 int port = BP_PORT(bp); bnx2x_src_init_t2()
925 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); bnx2x_src_init_t2()
927 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16, bnx2x_src_init_t2()
930 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16, bnx2x_src_init_t2()
H A Dbnx2x_cmn.c36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) bnx2x_add_all_napi_cnic() argument
46 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic()
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), for_each_rx_queue_cnic()
49 napi_hash_add(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic()
53 static void bnx2x_add_all_napi(struct bnx2x *bp) bnx2x_add_all_napi() argument
58 for_each_eth_queue(bp, i) { for_each_eth_queue()
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), for_each_eth_queue()
61 napi_hash_add(&bnx2x_fp(bp, i, napi)); for_each_eth_queue()
65 static int bnx2x_calc_num_queues(struct bnx2x *bp) bnx2x_calc_num_queues() argument
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); bnx2x_calc_num_queues()
80 * @bp: driver handle
84 * Makes sure the contents of the bp->fp[to].napi is kept
90 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) bnx2x_move_fp() argument
92 struct bnx2x_fastpath *from_fp = &bp->fp[from]; bnx2x_move_fp()
93 struct bnx2x_fastpath *to_fp = &bp->fp[to]; bnx2x_move_fp()
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; bnx2x_move_fp()
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; bnx2x_move_fp()
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; bnx2x_move_fp()
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; bnx2x_move_fp()
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; bnx2x_move_fp()
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * bnx2x_move_fp()
127 (bp)->max_cos; bnx2x_move_fp()
128 if (from == FCOE_IDX(bp)) { bnx2x_move_fp()
133 memcpy(&bp->bnx2x_txq[new_txdata_index], bnx2x_move_fp()
134 &bp->bnx2x_txq[old_txdata_index], bnx2x_move_fp()
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; bnx2x_move_fp()
142 * @bp: driver handle
147 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) bnx2x_fill_fw_str() argument
149 if (IS_PF(bp)) { bnx2x_fill_fw_str()
153 bnx2x_get_ext_phy_fw_version(&bp->link_params, bnx2x_fill_fw_str()
155 strlcpy(buf, bp->fw_ver, buf_len); bnx2x_fill_fw_str()
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), bnx2x_fill_fw_str()
158 (bp->common.bc_ver & 0xff0000) >> 16, bnx2x_fill_fw_str()
159 (bp->common.bc_ver & 0xff00) >> 8, bnx2x_fill_fw_str()
160 (bp->common.bc_ver & 0xff), bnx2x_fill_fw_str()
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len); bnx2x_fill_fw_str()
170 * @bp: driver handle
173 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) bnx2x_shrink_eth_fp() argument
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_shrink_eth_fp()
180 for (cos = 1; cos < bp->max_cos; cos++) { bnx2x_shrink_eth_fp()
182 struct bnx2x_fastpath *fp = &bp->fp[i]; bnx2x_shrink_eth_fp()
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], bnx2x_shrink_eth_fp()
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; bnx2x_shrink_eth_fp()
197 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, bnx2x_free_tx_pkt() argument
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), bnx2x_free_tx_pkt()
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), bnx2x_free_tx_pkt()
276 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) bnx2x_tx_int() argument
283 if (unlikely(bp->panic)) bnx2x_tx_int()
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); bnx2x_tx_int()
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, bnx2x_tx_int()
336 (bp->state == BNX2X_STATE_OPEN) && bnx2x_tx_int()
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) bnx2x_tx_int()
358 struct bnx2x *bp = fp->bp; bnx2x_update_sge_prod() local
410 static u32 bnx2x_get_rxhash(const struct bnx2x *bp, bnx2x_get_rxhash() argument
415 if ((bp->dev->features & NETIF_F_RXHASH) && bnx2x_get_rxhash()
434 struct bnx2x *bp = fp->bp; bnx2x_tpa_start() local
447 mapping = dma_map_single(&bp->pdev->dev, bnx2x_tpa_start()
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_tpa_start()
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); bnx2x_tpa_start()
546 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_alloc_rx_sge() argument
571 mapping = dma_map_page(&bp->pdev->dev, pool->page, bnx2x_alloc_rx_sge()
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_alloc_rx_sge()
592 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_fill_frag_skb() argument
644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); bnx2x_fill_frag_skb()
646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_fill_frag_skb()
650 dma_unmap_page(&bp->pdev->dev, bnx2x_fill_frag_skb()
704 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) bnx2x_gro_ip_csum() argument
716 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) bnx2x_gro_ipv6_csum() argument
728 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_gro_csum() argument
732 gro_func(bp, skb); bnx2x_gro_csum()
737 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_receive() argument
744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); bnx2x_gro_receive()
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); bnx2x_gro_receive()
759 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_tpa_stop() argument
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bnx2x_tpa_stop()
804 skb->protocol = eth_type_trans(skb, bp->dev); bnx2x_tpa_stop()
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, bnx2x_tpa_stop()
811 bnx2x_gro_receive(bp, fp, skb); bnx2x_tpa_stop()
829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; bnx2x_tpa_stop()
832 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_alloc_rx_data() argument
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, bnx2x_alloc_rx_data()
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_alloc_rx_data()
888 struct bnx2x *bp = fp->bp; bnx2x_rx_int() local
896 if (unlikely(bp->panic)) bnx2x_rx_int()
926 if (unlikely(bp->panic)) bnx2x_rx_int()
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages, bnx2x_rx_int()
1008 if (bp->panic) bnx2x_rx_int()
1018 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2x_rx_int()
1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; bnx2x_rx_int()
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && bnx2x_rx_int()
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_rx_int()
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, bnx2x_rx_int()
1050 dma_unmap_single(&bp->pdev->dev, bnx2x_rx_int()
1057 bnx2x_fp_qstats(bp, fp)-> bnx2x_rx_int()
1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; bnx2x_rx_int()
1073 skb->protocol = eth_type_trans(skb, bp->dev); bnx2x_rx_int()
1076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); bnx2x_rx_int()
1081 if (bp->dev->features & NETIF_F_RXCSUM) bnx2x_rx_int()
1083 bnx2x_fp_qstats(bp, fp)); bnx2x_rx_int()
1090 bnx2x_set_rx_ts(bp, skb); bnx2x_rx_int()
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, bnx2x_rx_int()
1143 struct bnx2x *bp = fp->bp; bnx2x_msix_fp_int() local
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bnx2x_msix_fp_int()
1153 if (unlikely(bp->panic)) bnx2x_msix_fp_int()
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); bnx2x_msix_fp_int()
1168 void bnx2x_acquire_phy_lock(struct bnx2x *bp) bnx2x_acquire_phy_lock() argument
1170 mutex_lock(&bp->port.phy_mutex); bnx2x_acquire_phy_lock()
1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); bnx2x_acquire_phy_lock()
1175 void bnx2x_release_phy_lock(struct bnx2x *bp) bnx2x_release_phy_lock() argument
1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); bnx2x_release_phy_lock()
1179 mutex_unlock(&bp->port.phy_mutex); bnx2x_release_phy_lock()
1183 u16 bnx2x_get_mf_speed(struct bnx2x *bp) bnx2x_get_mf_speed() argument
1185 u16 line_speed = bp->link_vars.line_speed; bnx2x_get_mf_speed()
1186 if (IS_MF(bp)) { bnx2x_get_mf_speed()
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp, bnx2x_get_mf_speed()
1188 bp->mf_config[BP_VN(bp)]); bnx2x_get_mf_speed()
1193 if (IS_MF_PERCENT_BW(bp)) bnx2x_get_mf_speed()
1209 * @bp: driver handle
1214 static void bnx2x_fill_report_data(struct bnx2x *bp, bnx2x_fill_report_data() argument
1219 if (IS_PF(bp)) { bnx2x_fill_report_data()
1221 data->line_speed = bnx2x_get_mf_speed(bp); bnx2x_fill_report_data()
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) bnx2x_fill_report_data()
1228 if (!BNX2X_NUM_ETH_QUEUES(bp)) bnx2x_fill_report_data()
1233 if (bp->link_vars.duplex == DUPLEX_FULL) bnx2x_fill_report_data()
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) bnx2x_fill_report_data()
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) bnx2x_fill_report_data()
1247 *data = bp->vf_link_vars; bnx2x_fill_report_data()
1254 * @bp: driver handle
1261 void bnx2x_link_report(struct bnx2x *bp) bnx2x_link_report() argument
1263 bnx2x_acquire_phy_lock(bp); bnx2x_link_report()
1264 __bnx2x_link_report(bp); bnx2x_link_report()
1265 bnx2x_release_phy_lock(bp); bnx2x_link_report()
1271 * @bp: driver handle
1276 void __bnx2x_link_report(struct bnx2x *bp) __bnx2x_link_report() argument
1281 if (IS_PF(bp) && !CHIP_IS_E1(bp)) __bnx2x_link_report()
1282 bnx2x_read_mf_cfg(bp); __bnx2x_link_report()
1285 bnx2x_fill_report_data(bp, &cur_data); __bnx2x_link_report()
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || __bnx2x_link_report()
1290 &bp->last_reported_link.link_report_flags) && __bnx2x_link_report()
1295 bp->link_cnt++; __bnx2x_link_report()
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); __bnx2x_link_report()
1303 if (IS_PF(bp)) __bnx2x_link_report()
1304 bnx2x_iov_link_update(bp); __bnx2x_link_report()
1308 netif_carrier_off(bp->dev); __bnx2x_link_report()
1309 netdev_err(bp->dev, "NIC Link is Down\n"); __bnx2x_link_report()
1315 netif_carrier_on(bp->dev); __bnx2x_link_report()
1341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", __bnx2x_link_report()
1364 static void bnx2x_free_tpa_pool(struct bnx2x *bp, bnx2x_free_tpa_pool() argument
1379 dma_unmap_single(&bp->pdev->dev, bnx2x_free_tpa_pool()
1387 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) bnx2x_init_rx_rings_cnic() argument
1391 for_each_rx_queue_cnic(bp, j) { for_each_rx_queue_cnic()
1392 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_rx_queue_cnic()
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, for_each_rx_queue_cnic()
1406 void bnx2x_init_rx_rings(struct bnx2x *bp) bnx2x_init_rx_rings() argument
1408 int func = BP_FUNC(bp); bnx2x_init_rx_rings()
1413 for_each_eth_queue(bp, j) { for_each_eth_queue()
1414 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue()
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); for_each_eth_queue()
1421 for (i = 0; i < MAX_AGG_QS(bp); i++) { for_each_eth_queue()
1432 bnx2x_free_tpa_pool(bp, fp, i); for_each_eth_queue()
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, for_each_eth_queue()
1457 bnx2x_free_rx_sge_range(bp, fp, for_each_eth_queue()
1459 bnx2x_free_tpa_pool(bp, fp, for_each_eth_queue()
1460 MAX_AGG_QS(bp)); for_each_eth_queue()
1472 for_each_eth_queue(bp, j) { for_each_eth_queue()
1473 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue()
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, for_each_eth_queue()
1488 if (CHIP_IS_E1(bp)) { for_each_eth_queue()
1489 REG_WR(bp, BAR_USTRORM_INTMEM + for_each_eth_queue()
1492 REG_WR(bp, BAR_USTRORM_INTMEM + for_each_eth_queue()
1502 struct bnx2x *bp = fp->bp; bnx2x_free_tx_skbs_queue() local
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), for_each_cos_in_tx_queue()
1518 netdev_get_tx_queue(bp->dev, for_each_cos_in_tx_queue()
1523 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) bnx2x_free_tx_skbs_cnic() argument
1527 for_each_tx_queue_cnic(bp, i) { for_each_tx_queue_cnic()
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]); for_each_tx_queue_cnic()
1532 static void bnx2x_free_tx_skbs(struct bnx2x *bp) bnx2x_free_tx_skbs() argument
1536 for_each_eth_queue(bp, i) { for_each_eth_queue()
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]); for_each_eth_queue()
1543 struct bnx2x *bp = fp->bp; bnx2x_free_rx_bds() local
1556 dma_unmap_single(&bp->pdev->dev, bnx2x_free_rx_bds()
1565 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) bnx2x_free_rx_skbs_cnic() argument
1569 for_each_rx_queue_cnic(bp, j) { for_each_rx_queue_cnic()
1570 bnx2x_free_rx_bds(&bp->fp[j]); for_each_rx_queue_cnic()
1574 static void bnx2x_free_rx_skbs(struct bnx2x *bp) bnx2x_free_rx_skbs() argument
1578 for_each_eth_queue(bp, j) { for_each_eth_queue()
1579 struct bnx2x_fastpath *fp = &bp->fp[j]; for_each_eth_queue()
1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); for_each_eth_queue()
1588 static void bnx2x_free_skbs_cnic(struct bnx2x *bp) bnx2x_free_skbs_cnic() argument
1590 bnx2x_free_tx_skbs_cnic(bp); bnx2x_free_skbs_cnic()
1591 bnx2x_free_rx_skbs_cnic(bp); bnx2x_free_skbs_cnic()
1594 void bnx2x_free_skbs(struct bnx2x *bp) bnx2x_free_skbs() argument
1596 bnx2x_free_tx_skbs(bp); bnx2x_free_skbs()
1597 bnx2x_free_rx_skbs(bp); bnx2x_free_skbs()
1600 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) bnx2x_update_max_mf_config() argument
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; bnx2x_update_max_mf_config()
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { bnx2x_update_max_mf_config()
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); bnx2x_update_max_mf_config()
1620 * @bp: driver handle
1623 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) bnx2x_free_msix_irqs() argument
1631 if (IS_PF(bp)) { bnx2x_free_msix_irqs()
1632 free_irq(bp->msix_table[offset].vector, bp->dev); bnx2x_free_msix_irqs()
1634 bp->msix_table[offset].vector); bnx2x_free_msix_irqs()
1638 if (CNIC_SUPPORT(bp)) { bnx2x_free_msix_irqs()
1644 for_each_eth_queue(bp, i) { for_each_eth_queue()
1648 i, bp->msix_table[offset].vector); for_each_eth_queue()
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); for_each_eth_queue()
1654 void bnx2x_free_irq(struct bnx2x *bp) bnx2x_free_irq() argument
1656 if (bp->flags & USING_MSIX_FLAG && bnx2x_free_irq()
1657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { bnx2x_free_irq()
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); bnx2x_free_irq()
1661 if (IS_PF(bp)) bnx2x_free_irq()
1664 bnx2x_free_msix_irqs(bp, nvecs); bnx2x_free_irq()
1666 free_irq(bp->dev->irq, bp->dev); bnx2x_free_irq()
1670 int bnx2x_enable_msix(struct bnx2x *bp) bnx2x_enable_msix() argument
1675 if (IS_PF(bp)) { bnx2x_enable_msix()
1676 bp->msix_table[msix_vec].entry = msix_vec; bnx2x_enable_msix()
1678 bp->msix_table[0].entry); bnx2x_enable_msix()
1683 if (CNIC_SUPPORT(bp)) { bnx2x_enable_msix()
1684 bp->msix_table[msix_vec].entry = msix_vec; bnx2x_enable_msix()
1686 msix_vec, bp->msix_table[msix_vec].entry); bnx2x_enable_msix()
1691 for_each_eth_queue(bp, i) { for_each_eth_queue()
1692 bp->msix_table[msix_vec].entry = msix_vec; for_each_eth_queue()
1701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1717 bp->flags |= USING_SINGLE_MSIX_FLAG;
1720 bp->num_ethernet_queues = 1;
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1734 bp->num_ethernet_queues -= diff;
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1738 bp->num_queues);
1741 bp->flags |= USING_MSIX_FLAG;
1748 bp->flags |= DISABLE_MSI_FLAG;
1753 static int bnx2x_req_msix_irqs(struct bnx2x *bp) bnx2x_req_msix_irqs() argument
1758 if (IS_PF(bp)) { bnx2x_req_msix_irqs()
1759 rc = request_irq(bp->msix_table[offset++].vector, bnx2x_req_msix_irqs()
1761 bp->dev->name, bp->dev); bnx2x_req_msix_irqs()
1768 if (CNIC_SUPPORT(bp)) bnx2x_req_msix_irqs()
1771 for_each_eth_queue(bp, i) { for_each_eth_queue()
1772 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
1774 bp->dev->name, i); for_each_eth_queue()
1776 rc = request_irq(bp->msix_table[offset].vector, for_each_eth_queue()
1780 bp->msix_table[offset].vector, rc); for_each_eth_queue()
1781 bnx2x_free_msix_irqs(bp, offset); for_each_eth_queue()
1788 i = BNX2X_NUM_ETH_QUEUES(bp);
1789 if (IS_PF(bp)) {
1790 offset = 1 + CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1793 bp->msix_table[0].vector,
1794 0, bp->msix_table[offset].vector,
1795 i - 1, bp->msix_table[offset + i - 1].vector);
1797 offset = CNIC_SUPPORT(bp);
1798 netdev_info(bp->dev,
1800 0, bp->msix_table[offset].vector,
1801 i - 1, bp->msix_table[offset + i - 1].vector);
1806 int bnx2x_enable_msi(struct bnx2x *bp) bnx2x_enable_msi() argument
1810 rc = pci_enable_msi(bp->pdev); bnx2x_enable_msi()
1815 bp->flags |= USING_MSI_FLAG; bnx2x_enable_msi()
1820 static int bnx2x_req_irq(struct bnx2x *bp) bnx2x_req_irq() argument
1825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) bnx2x_req_irq()
1830 if (bp->flags & USING_MSIX_FLAG) bnx2x_req_irq()
1831 irq = bp->msix_table[0].vector; bnx2x_req_irq()
1833 irq = bp->pdev->irq; bnx2x_req_irq()
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); bnx2x_req_irq()
1838 static int bnx2x_setup_irqs(struct bnx2x *bp) bnx2x_setup_irqs() argument
1841 if (bp->flags & USING_MSIX_FLAG && bnx2x_setup_irqs()
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) { bnx2x_setup_irqs()
1843 rc = bnx2x_req_msix_irqs(bp); bnx2x_setup_irqs()
1847 rc = bnx2x_req_irq(bp); bnx2x_setup_irqs()
1852 if (bp->flags & USING_MSI_FLAG) { bnx2x_setup_irqs()
1853 bp->dev->irq = bp->pdev->irq; bnx2x_setup_irqs()
1854 netdev_info(bp->dev, "using MSI IRQ %d\n", bnx2x_setup_irqs()
1855 bp->dev->irq); bnx2x_setup_irqs()
1857 if (bp->flags & USING_MSIX_FLAG) { bnx2x_setup_irqs()
1858 bp->dev->irq = bp->msix_table[0].vector; bnx2x_setup_irqs()
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n", bnx2x_setup_irqs()
1860 bp->dev->irq); bnx2x_setup_irqs()
1867 static void bnx2x_napi_enable_cnic(struct bnx2x *bp) bnx2x_napi_enable_cnic() argument
1871 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic()
1872 bnx2x_fp_busy_poll_init(&bp->fp[i]); for_each_rx_queue_cnic()
1873 napi_enable(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic()
1877 static void bnx2x_napi_enable(struct bnx2x *bp) bnx2x_napi_enable() argument
1881 for_each_eth_queue(bp, i) { for_each_eth_queue()
1882 bnx2x_fp_busy_poll_init(&bp->fp[i]); for_each_eth_queue()
1883 napi_enable(&bnx2x_fp(bp, i, napi)); for_each_eth_queue()
1887 static void bnx2x_napi_disable_cnic(struct bnx2x *bp) bnx2x_napi_disable_cnic() argument
1891 for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic()
1892 napi_disable(&bnx2x_fp(bp, i, napi)); for_each_rx_queue_cnic()
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i])) for_each_rx_queue_cnic()
1898 static void bnx2x_napi_disable(struct bnx2x *bp) bnx2x_napi_disable() argument
1902 for_each_eth_queue(bp, i) { for_each_eth_queue()
1903 napi_disable(&bnx2x_fp(bp, i, napi)); for_each_eth_queue()
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i])) for_each_eth_queue()
1909 void bnx2x_netif_start(struct bnx2x *bp) bnx2x_netif_start() argument
1911 if (netif_running(bp->dev)) { bnx2x_netif_start()
1912 bnx2x_napi_enable(bp); bnx2x_netif_start()
1913 if (CNIC_LOADED(bp)) bnx2x_netif_start()
1914 bnx2x_napi_enable_cnic(bp); bnx2x_netif_start()
1915 bnx2x_int_enable(bp); bnx2x_netif_start()
1916 if (bp->state == BNX2X_STATE_OPEN) bnx2x_netif_start()
1917 netif_tx_wake_all_queues(bp->dev); bnx2x_netif_start()
1921 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_netif_stop() argument
1923 bnx2x_int_disable_sync(bp, disable_hw); bnx2x_netif_stop()
1924 bnx2x_napi_disable(bp); bnx2x_netif_stop()
1925 if (CNIC_LOADED(bp)) bnx2x_netif_stop()
1926 bnx2x_napi_disable_cnic(bp); bnx2x_netif_stop()
1932 struct bnx2x *bp = netdev_priv(dev); bnx2x_select_queue() local
1934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { bnx2x_select_queue()
1948 return bnx2x_fcoe_tx(bp, txq_index); bnx2x_select_queue()
1952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); bnx2x_select_queue()
1955 void bnx2x_set_num_queues(struct bnx2x *bp) bnx2x_set_num_queues() argument
1958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); bnx2x_set_num_queues()
1961 if (IS_MF_STORAGE_ONLY(bp)) bnx2x_set_num_queues()
1962 bp->num_ethernet_queues = 1; bnx2x_set_num_queues()
1965 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ bnx2x_set_num_queues()
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_set_num_queues()
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); bnx2x_set_num_queues()
1974 * @bp: Driver handle
1978 * bp->max_cos.
1993 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) bnx2x_set_real_num_queues() argument
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; bnx2x_set_real_num_queues()
1998 rx = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_set_real_num_queues()
2001 if (include_cnic && !NO_FCOE(bp)) { bnx2x_set_real_num_queues()
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx); bnx2x_set_real_num_queues()
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx); bnx2x_set_real_num_queues()
2023 static void bnx2x_set_rx_buf_size(struct bnx2x *bp) bnx2x_set_rx_buf_size() argument
2027 for_each_queue(bp, i) { for_each_queue()
2028 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_queue()
2041 mtu = bp->dev->mtu; for_each_queue()
2055 static int bnx2x_init_rss(struct bnx2x *bp) bnx2x_init_rss() argument
2058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_init_rss()
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) bnx2x_init_rss()
2064 bp->rss_conf_obj.ind_table[i] = bnx2x_init_rss()
2065 bp->fp->cl_id + bnx2x_init_rss()
2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); bnx2x_init_rss()
2079 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, bnx2x_rss() argument
2087 * if (!is_eth_multi(bp)) bnx2x_rss()
2088 * bp->multi_mode = ETH_RSS_MODE_DISABLED; bnx2x_rss()
2108 if (!CHIP_IS_E1x(bp)) { bnx2x_rss()
2131 if (IS_PF(bp)) bnx2x_rss()
2132 return bnx2x_config_rss(bp, &params); bnx2x_rss()
2134 return bnx2x_vfpf_config_rss(bp, &params); bnx2x_rss()
2137 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) bnx2x_init_hw() argument
2144 func_params.f_obj = &bp->func_obj; bnx2x_init_hw()
2149 return bnx2x_func_state_change(bp, &func_params); bnx2x_init_hw()
2156 void bnx2x_squeeze_objects(struct bnx2x *bp) bnx2x_squeeze_objects() argument
2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; bnx2x_squeeze_objects()
2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, bnx2x_squeeze_objects()
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, bnx2x_squeeze_objects()
2186 rparam.mcast_obj = &bp->mcast_obj; bnx2x_squeeze_objects()
2193 netif_addr_lock_bh(bp->dev); bnx2x_squeeze_objects()
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); bnx2x_squeeze_objects()
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_squeeze_objects()
2205 netif_addr_unlock_bh(bp->dev); bnx2x_squeeze_objects()
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_squeeze_objects()
2211 netif_addr_unlock_bh(bp->dev); bnx2x_squeeze_objects()
2215 #define LOAD_ERROR_EXIT(bp, label) \
2217 (bp)->state = BNX2X_STATE_ERROR; \
2221 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2223 bp->cnic_loaded = false; \
2227 #define LOAD_ERROR_EXIT(bp, label) \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2233 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2241 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) bnx2x_free_fw_stats_mem() argument
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bnx2x_free_fw_stats_mem()
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_free_fw_stats_mem()
2248 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) bnx2x_alloc_fw_stats_mem() argument
2251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; bnx2x_alloc_fw_stats_mem()
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; bnx2x_alloc_fw_stats_mem()
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; bnx2x_alloc_fw_stats_mem()
2264 * the VFs themselves. We don't include them in the bp->fw_stats_num as bnx2x_alloc_fw_stats_mem()
2268 if (IS_SRIOV(bp)) bnx2x_alloc_fw_stats_mem()
2269 vf_headroom = bnx2x_vf_headroom(bp); bnx2x_alloc_fw_stats_mem()
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + bnx2x_alloc_fw_stats_mem()
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? bnx2x_alloc_fw_stats_mem()
2282 bp->fw_stats_num, vf_headroom, num_groups); bnx2x_alloc_fw_stats_mem()
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + bnx2x_alloc_fw_stats_mem()
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) + bnx2x_alloc_fw_stats_mem()
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, bnx2x_alloc_fw_stats_mem()
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz); bnx2x_alloc_fw_stats_mem()
2302 if (!bp->fw_stats) bnx2x_alloc_fw_stats_mem()
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; bnx2x_alloc_fw_stats_mem()
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping; bnx2x_alloc_fw_stats_mem()
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *) bnx2x_alloc_fw_stats_mem()
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); bnx2x_alloc_fw_stats_mem()
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping + bnx2x_alloc_fw_stats_mem()
2311 bp->fw_stats_req_sz; bnx2x_alloc_fw_stats_mem()
2314 U64_HI(bp->fw_stats_req_mapping), bnx2x_alloc_fw_stats_mem()
2315 U64_LO(bp->fw_stats_req_mapping)); bnx2x_alloc_fw_stats_mem()
2317 U64_HI(bp->fw_stats_data_mapping), bnx2x_alloc_fw_stats_mem()
2318 U64_LO(bp->fw_stats_data_mapping)); bnx2x_alloc_fw_stats_mem()
2322 bnx2x_free_fw_stats_mem(bp); bnx2x_alloc_fw_stats_mem()
2328 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) bnx2x_nic_load_request() argument
2333 bp->fw_seq = bnx2x_nic_load_request()
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & bnx2x_nic_load_request()
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); bnx2x_nic_load_request()
2339 bp->fw_drv_pulse_wr_seq = bnx2x_nic_load_request()
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & bnx2x_nic_load_request()
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); bnx2x_nic_load_request()
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) bnx2x_nic_load_request()
2350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); bnx2x_nic_load_request()
2372 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) bnx2x_compare_fw_ver() argument
2384 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); bnx2x_compare_fw_ver()
2404 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) bnx2x_nic_load_no_mcp() argument
2406 int path = BP_PATH(bp); bnx2x_nic_load_no_mcp()
2425 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) bnx2x_nic_load_pmf() argument
2430 bp->port.pmf = 1; bnx2x_nic_load_pmf()
2432 * writing to bp->port.pmf here and reading it from the bnx2x_nic_load_pmf()
2437 bp->port.pmf = 0; bnx2x_nic_load_pmf()
2440 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); bnx2x_nic_load_pmf()
2443 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) bnx2x_nic_load_afex_dcc() argument
2447 (bp->common.shmem2_base)) { bnx2x_nic_load_afex_dcc()
2448 if (SHMEM2_HAS(bp, dcc_support)) bnx2x_nic_load_afex_dcc()
2449 SHMEM2_WR(bp, dcc_support, bnx2x_nic_load_afex_dcc()
2452 if (SHMEM2_HAS(bp, afex_driver_support)) bnx2x_nic_load_afex_dcc()
2453 SHMEM2_WR(bp, afex_driver_support, bnx2x_nic_load_afex_dcc()
2458 bp->afex_def_vlan_tag = -1; bnx2x_nic_load_afex_dcc()
2464 * @bp: driver handle
2467 * Makes sure the contents of the bp->fp[index].napi is kept
2470 static void bnx2x_bz_fp(struct bnx2x *bp, int index) bnx2x_bz_fp() argument
2472 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_bz_fp()
2486 fp->bp = bp; bnx2x_bz_fp()
2489 fp->max_cos = bp->max_cos; bnx2x_bz_fp()
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; bnx2x_bz_fp()
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * bnx2x_bz_fp()
2500 BNX2X_NUM_ETH_QUEUES(bp) + index]; bnx2x_bz_fp()
2505 if (bp->dev->features & NETIF_F_LRO) bnx2x_bz_fp()
2507 else if (bp->dev->features & NETIF_F_GRO && bnx2x_bz_fp()
2508 bnx2x_mtu_allows_gro(bp->dev->mtu)) bnx2x_bz_fp()
2513 /* We don't want TPA if it's disabled in bp bnx2x_bz_fp()
2516 if (bp->disable_tpa || IS_FCOE_FP(fp)) bnx2x_bz_fp()
2520 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) bnx2x_set_os_driver_state() argument
2524 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) bnx2x_set_os_driver_state()
2527 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); bnx2x_set_os_driver_state()
2531 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); bnx2x_set_os_driver_state()
2534 int bnx2x_load_cnic(struct bnx2x *bp) bnx2x_load_cnic() argument
2536 int i, rc, port = BP_PORT(bp); bnx2x_load_cnic()
2540 mutex_init(&bp->cnic_mutex); bnx2x_load_cnic()
2542 if (IS_PF(bp)) { bnx2x_load_cnic()
2543 rc = bnx2x_alloc_mem_cnic(bp); bnx2x_load_cnic()
2545 BNX2X_ERR("Unable to allocate bp memory for cnic\n"); bnx2x_load_cnic()
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic()
2550 rc = bnx2x_alloc_fp_mem_cnic(bp); bnx2x_load_cnic()
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic()
2557 rc = bnx2x_set_real_num_queues(bp, 1); bnx2x_load_cnic()
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); bnx2x_load_cnic()
2564 bnx2x_add_all_napi_cnic(bp); bnx2x_load_cnic()
2566 bnx2x_napi_enable_cnic(bp); bnx2x_load_cnic()
2568 rc = bnx2x_init_hw_func_cnic(bp); bnx2x_load_cnic()
2570 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); bnx2x_load_cnic()
2572 bnx2x_nic_init_cnic(bp); bnx2x_load_cnic()
2574 if (IS_PF(bp)) { bnx2x_load_cnic()
2576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); bnx2x_load_cnic()
2579 for_each_cnic_queue(bp, i) { for_each_cnic_queue()
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); for_each_cnic_queue()
2583 LOAD_ERROR_EXIT(bp, load_error_cnic2); for_each_cnic_queue()
2589 bnx2x_set_rx_mode_inner(bp);
2592 bnx2x_get_iscsi_info(bp);
2593 bnx2x_setup_cnic_irq_info(bp);
2594 bnx2x_setup_cnic_info(bp);
2595 bp->cnic_loaded = true;
2596 if (bp->state == BNX2X_STATE_OPEN)
2597 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2606 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2609 bnx2x_napi_disable_cnic(bp);
2611 if (bnx2x_set_real_num_queues(bp, 0))
2615 bnx2x_free_fp_mem_cnic(bp);
2616 bnx2x_free_mem_cnic(bp);
2622 int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_nic_load() argument
2624 int port = BP_PORT(bp); bnx2x_nic_load()
2629 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); bnx2x_nic_load()
2632 if (unlikely(bp->panic)) { bnx2x_nic_load()
2638 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; bnx2x_nic_load()
2641 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); bnx2x_nic_load()
2643 &bp->last_reported_link.link_report_flags); bnx2x_nic_load()
2645 if (IS_PF(bp)) bnx2x_nic_load()
2647 bnx2x_ilt_set_info(bp); bnx2x_nic_load()
2651 * allocated only once, fp index, max_cos, bp pointer. bnx2x_nic_load()
2654 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); bnx2x_nic_load()
2655 for_each_queue(bp, i) bnx2x_nic_load()
2656 bnx2x_bz_fp(bp, i); bnx2x_nic_load()
2657 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + bnx2x_nic_load()
2658 bp->num_cnic_queues) * bnx2x_nic_load()
2661 bp->fcoe_init = false; bnx2x_nic_load()
2664 bnx2x_set_rx_buf_size(bp); bnx2x_nic_load()
2666 if (IS_PF(bp)) { bnx2x_nic_load()
2667 rc = bnx2x_alloc_mem(bp); bnx2x_nic_load()
2669 BNX2X_ERR("Unable to allocate bp memory\n"); bnx2x_nic_load()
2677 rc = bnx2x_alloc_fp_mem(bp); bnx2x_nic_load()
2680 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load()
2684 if (bnx2x_alloc_fw_stats_mem(bp)) bnx2x_nic_load()
2685 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load()
2688 if (IS_VF(bp)) { bnx2x_nic_load()
2689 rc = bnx2x_vfpf_init(bp); bnx2x_nic_load()
2691 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load()
2695 * bp->num_queues, bnx2x_set_real_num_queues() should always bnx2x_nic_load()
2698 rc = bnx2x_set_real_num_queues(bp, 0); bnx2x_nic_load()
2701 LOAD_ERROR_EXIT(bp, load_error0); bnx2x_nic_load()
2708 bnx2x_setup_tc(bp->dev, bp->max_cos); bnx2x_nic_load()
2711 bnx2x_add_all_napi(bp); bnx2x_nic_load()
2713 bnx2x_napi_enable(bp); bnx2x_nic_load()
2715 if (IS_PF(bp)) { bnx2x_nic_load()
2717 bnx2x_set_pf_load(bp); bnx2x_nic_load()
2720 if (!BP_NOMCP(bp)) { bnx2x_nic_load()
2722 rc = bnx2x_nic_load_request(bp, &load_code); bnx2x_nic_load()
2724 LOAD_ERROR_EXIT(bp, load_error1); bnx2x_nic_load()
2727 rc = bnx2x_compare_fw_ver(bp, load_code, true); bnx2x_nic_load()
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load()
2730 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load()
2733 load_code = bnx2x_nic_load_no_mcp(bp, port); bnx2x_nic_load()
2737 bnx2x_nic_load_pmf(bp, load_code); bnx2x_nic_load()
2740 bnx2x__init_func_obj(bp); bnx2x_nic_load()
2743 rc = bnx2x_init_hw(bp, load_code); bnx2x_nic_load()
2746 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load()
2747 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load()
2751 bnx2x_pre_irq_nic_init(bp); bnx2x_nic_load()
2754 rc = bnx2x_setup_irqs(bp); bnx2x_nic_load()
2757 if (IS_PF(bp)) bnx2x_nic_load()
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load()
2759 LOAD_ERROR_EXIT(bp, load_error2); bnx2x_nic_load()
2763 if (IS_PF(bp)) { bnx2x_nic_load()
2765 bnx2x_post_irq_nic_init(bp, load_code); bnx2x_nic_load()
2767 bnx2x_init_bp_objs(bp); bnx2x_nic_load()
2768 bnx2x_iov_nic_init(bp); bnx2x_nic_load()
2771 bp->afex_def_vlan_tag = -1; bnx2x_nic_load()
2772 bnx2x_nic_load_afex_dcc(bp, load_code); bnx2x_nic_load()
2773 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; bnx2x_nic_load()
2774 rc = bnx2x_func_start(bp); bnx2x_nic_load()
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); bnx2x_nic_load()
2779 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load()
2783 if (!BP_NOMCP(bp)) { bnx2x_nic_load()
2784 load_code = bnx2x_fw_command(bp, bnx2x_nic_load()
2789 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load()
2794 bnx2x_update_coalesce(bp); bnx2x_nic_load()
2798 rc = bnx2x_setup_leading(bp); bnx2x_nic_load()
2801 LOAD_ERROR_EXIT(bp, load_error3); bnx2x_nic_load()
2805 for_each_nondefault_eth_queue(bp, i) { for_each_nondefault_eth_queue()
2806 if (IS_PF(bp)) for_each_nondefault_eth_queue()
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false); for_each_nondefault_eth_queue()
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); for_each_nondefault_eth_queue()
2812 LOAD_ERROR_EXIT(bp, load_error3); for_each_nondefault_eth_queue()
2817 rc = bnx2x_init_rss(bp);
2820 LOAD_ERROR_EXIT(bp, load_error3);
2824 bp->state = BNX2X_STATE_OPEN;
2827 if (IS_PF(bp))
2828 rc = bnx2x_set_eth_mac(bp, true);
2830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2834 LOAD_ERROR_EXIT(bp, load_error3);
2837 if (IS_PF(bp) && bp->pending_max) {
2838 bnx2x_update_max_mf_config(bp, bp->pending_max);
2839 bp->pending_max = 0;
2842 if (bp->port.pmf) {
2843 rc = bnx2x_initial_phy_init(bp, load_mode);
2845 LOAD_ERROR_EXIT(bp, load_error3);
2847 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2852 rc = bnx2x_vlan_reconfigure_vid(bp);
2854 LOAD_ERROR_EXIT(bp, load_error3);
2857 bnx2x_set_rx_mode_inner(bp);
2859 if (bp->flags & PTP_SUPPORTED) {
2860 bnx2x_init_ptp(bp);
2861 bnx2x_configure_ptp_filters(bp);
2867 netif_tx_wake_all_queues(bp->dev);
2871 netif_tx_start_all_queues(bp->dev);
2877 bp->state = BNX2X_STATE_DIAG;
2884 if (bp->port.pmf)
2885 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2887 bnx2x__link_status_update(bp);
2890 mod_timer(&bp->timer, jiffies + bp->current_interval);
2892 if (CNIC_ENABLED(bp))
2893 bnx2x_load_cnic(bp);
2895 if (IS_PF(bp))
2896 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2898 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2901 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2903 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2904 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2910 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2912 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2917 if (IS_PF(bp))
2918 bnx2x_update_mfw_dump(bp);
2921 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2922 bnx2x_dcbx_init(bp, false);
2924 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2925 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2933 if (IS_PF(bp)) {
2934 bnx2x_int_disable_sync(bp, 1);
2937 bnx2x_squeeze_objects(bp);
2941 bnx2x_free_skbs(bp);
2942 for_each_rx_queue(bp, i)
2943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2946 bnx2x_free_irq(bp);
2948 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2953 bp->port.pmf = 0;
2955 bnx2x_napi_disable(bp);
2956 bnx2x_del_all_napi(bp);
2959 if (IS_PF(bp))
2960 bnx2x_clear_pf_load(bp);
2962 bnx2x_free_fw_stats_mem(bp);
2963 bnx2x_free_fp_mem(bp);
2964 bnx2x_free_mem(bp);
2970 int bnx2x_drain_tx_queues(struct bnx2x *bp) bnx2x_drain_tx_queues() argument
2975 for_each_tx_queue(bp, i) { for_each_tx_queue()
2976 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_tx_queue()
2979 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); for_each_tx_queue()
2987 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_nic_unload() argument
2994 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) bnx2x_nic_unload()
2995 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); bnx2x_nic_unload()
2998 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { bnx2x_nic_unload()
3000 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); bnx2x_nic_unload()
3001 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], bnx2x_nic_unload()
3005 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && bnx2x_nic_unload()
3006 (bp->state == BNX2X_STATE_CLOSED || bnx2x_nic_unload()
3007 bp->state == BNX2X_STATE_ERROR)) { bnx2x_nic_unload()
3015 bp->recovery_state = BNX2X_RECOVERY_DONE; bnx2x_nic_unload()
3016 bp->is_leader = 0; bnx2x_nic_unload()
3017 bnx2x_release_leader_lock(bp); bnx2x_nic_unload()
3031 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) bnx2x_nic_unload()
3034 /* It's important to set the bp->state to the value different from bnx2x_nic_unload()
3038 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; bnx2x_nic_unload()
3042 bnx2x_iov_channel_down(bp); bnx2x_nic_unload()
3044 if (CNIC_LOADED(bp)) bnx2x_nic_unload()
3045 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); bnx2x_nic_unload()
3048 bnx2x_tx_disable(bp); bnx2x_nic_unload()
3049 netdev_reset_tc(bp->dev); bnx2x_nic_unload()
3051 bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_nic_unload()
3053 del_timer_sync(&bp->timer); bnx2x_nic_unload()
3055 if (IS_PF(bp)) { bnx2x_nic_unload()
3057 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bnx2x_nic_unload()
3058 bnx2x_drv_pulse(bp); bnx2x_nic_unload()
3059 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_nic_unload()
3060 bnx2x_save_statistics(bp); bnx2x_nic_unload()
3064 bnx2x_drain_tx_queues(bp); bnx2x_nic_unload()
3069 if (IS_VF(bp)) bnx2x_nic_unload()
3070 bnx2x_vfpf_close_vf(bp); bnx2x_nic_unload()
3073 bnx2x_chip_cleanup(bp, unload_mode, keep_link); bnx2x_nic_unload()
3076 bnx2x_send_unload_req(bp, unload_mode); bnx2x_nic_unload()
3084 if (!CHIP_IS_E1x(bp)) bnx2x_nic_unload()
3085 bnx2x_pf_disable(bp); bnx2x_nic_unload()
3088 bnx2x_netif_stop(bp, 1); bnx2x_nic_unload()
3090 bnx2x_del_all_napi(bp); bnx2x_nic_unload()
3091 if (CNIC_LOADED(bp)) bnx2x_nic_unload()
3092 bnx2x_del_all_napi_cnic(bp); bnx2x_nic_unload()
3094 bnx2x_free_irq(bp); bnx2x_nic_unload()
3097 bnx2x_send_unload_done(bp, false); bnx2x_nic_unload()
3104 if (IS_PF(bp)) bnx2x_nic_unload()
3105 bnx2x_squeeze_objects(bp); bnx2x_nic_unload()
3108 bp->sp_state = 0; bnx2x_nic_unload()
3110 bp->port.pmf = 0; bnx2x_nic_unload()
3113 bp->sp_rtnl_state = 0; bnx2x_nic_unload()
3117 bnx2x_free_skbs(bp); bnx2x_nic_unload()
3118 if (CNIC_LOADED(bp)) bnx2x_nic_unload()
3119 bnx2x_free_skbs_cnic(bp); bnx2x_nic_unload()
3120 for_each_rx_queue(bp, i) bnx2x_nic_unload()
3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); bnx2x_nic_unload()
3123 bnx2x_free_fp_mem(bp); bnx2x_nic_unload()
3124 if (CNIC_LOADED(bp)) bnx2x_nic_unload()
3125 bnx2x_free_fp_mem_cnic(bp); bnx2x_nic_unload()
3127 if (IS_PF(bp)) { bnx2x_nic_unload()
3128 if (CNIC_LOADED(bp)) bnx2x_nic_unload()
3129 bnx2x_free_mem_cnic(bp); bnx2x_nic_unload()
3131 bnx2x_free_mem(bp); bnx2x_nic_unload()
3133 bp->state = BNX2X_STATE_CLOSED; bnx2x_nic_unload()
3134 bp->cnic_loaded = false; bnx2x_nic_unload()
3137 if (IS_PF(bp)) bnx2x_nic_unload()
3138 bnx2x_update_mng_version(bp); bnx2x_nic_unload()
3143 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { bnx2x_nic_unload()
3144 bnx2x_set_reset_in_progress(bp); bnx2x_nic_unload()
3148 bnx2x_set_reset_global(bp); bnx2x_nic_unload()
3154 if (IS_PF(bp) && bnx2x_nic_unload()
3155 !bnx2x_clear_pf_load(bp) && bnx2x_nic_unload()
3156 bnx2x_reset_is_done(bp, BP_PATH(bp))) bnx2x_nic_unload()
3157 bnx2x_disable_close_the_gate(bp); bnx2x_nic_unload()
3164 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) bnx2x_set_power_state() argument
3169 if (!bp->pdev->pm_cap) { bnx2x_set_power_state()
3174 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); bnx2x_set_power_state()
3178 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, bnx2x_set_power_state()
3190 if (atomic_read(&bp->pdev->enable_cnt) != 1) bnx2x_set_power_state()
3193 if (CHIP_REV_IS_SLOW(bp)) bnx2x_set_power_state()
3199 if (bp->wol) bnx2x_set_power_state()
3202 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, bnx2x_set_power_state()
3211 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); bnx2x_set_power_state()
3226 struct bnx2x *bp = fp->bp; bnx2x_poll() local
3230 if (unlikely(bp->panic)) { bnx2x_poll()
3240 bnx2x_tx_int(bp, fp->txdata_ptr[cos]); for_each_cos_in_tx_queue()
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3303 struct bnx2x *bp = fp->bp; bnx2x_low_latency_recv() local
3306 if ((bp->state == BNX2X_STATE_CLOSED) || bnx2x_low_latency_recv()
3307 (bp->state == BNX2X_STATE_ERROR) || bnx2x_low_latency_recv()
3308 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) bnx2x_low_latency_recv()
3327 static u16 bnx2x_tx_split(struct bnx2x *bp, bnx2x_tx_split() argument
3386 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) bnx2x_xmit_type() argument
3404 if (!CHIP_IS_E1x(bp) && skb->encapsulation) { bnx2x_xmit_type()
3443 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, bnx2x_pkt_req_lin() argument
3557 * @bp: driver handle
3564 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum_enc() argument
3591 * @bp: driver handle
3598 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum_e2() argument
3620 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_sbd_csum() argument
3636 * @bp: driver handle
3641 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, bnx2x_set_pbd_csum() argument
3784 struct bnx2x *bp = netdev_priv(dev); bnx2x_start_xmit() local
3798 u32 xmit_type = bnx2x_xmit_type(bp, skb); bnx2x_start_xmit()
3806 if (unlikely(bp->panic)) bnx2x_start_xmit()
3813 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); bnx2x_start_xmit()
3815 txdata = &bp->bnx2x_txq[txq_index]; bnx2x_start_xmit()
3826 if (unlikely(bnx2x_tx_avail(bp, txdata) < bnx2x_start_xmit()
3833 bnx2x_fp_qstats(bp, txdata->parent_fp); bnx2x_start_xmit()
3838 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; bnx2x_start_xmit()
3865 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { bnx2x_start_xmit()
3867 bp->lin_cnt++; bnx2x_start_xmit()
3877 mapping = dma_map_single(&bp->pdev->dev, skb->data, bnx2x_start_xmit()
3879 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_start_xmit()
3911 if (!(bp->flags & TX_TIMESTAMPING_EN)) { bnx2x_start_xmit()
3913 } else if (bp->ptp_tx_skb) { bnx2x_start_xmit()
3918 bp->ptp_tx_skb = skb_get(skb); bnx2x_start_xmit()
3919 bp->ptp_tx_start = jiffies; bnx2x_start_xmit()
3920 schedule_work(&bp->ptp_task); bnx2x_start_xmit()
3946 if (IS_VF(bp)) bnx2x_start_xmit()
3963 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); bnx2x_start_xmit()
3965 if (!CHIP_IS_E1x(bp)) { bnx2x_start_xmit()
3973 hlen = bnx2x_set_pbd_csum_enc(bp, skb, bnx2x_start_xmit()
4007 hlen = bnx2x_set_pbd_csum_e2(bp, skb, bnx2x_start_xmit()
4016 if (IS_VF(bp)) { bnx2x_start_xmit()
4028 if (bp->flags & TX_SWITCHING) bnx2x_start_xmit()
4053 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); bnx2x_start_xmit()
4084 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, bnx2x_start_xmit()
4088 if (!CHIP_IS_E1x(bp)) bnx2x_start_xmit()
4109 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, bnx2x_start_xmit()
4111 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_start_xmit()
4123 bnx2x_free_tx_pkt(bp, txdata, bnx2x_start_xmit()
4206 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); bnx2x_start_xmit()
4212 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { bnx2x_start_xmit()
4220 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; bnx2x_start_xmit()
4221 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) bnx2x_start_xmit()
4229 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) bnx2x_get_c2s_mapping() argument
4231 int mfw_vn = BP_FW_MB_IDX(bp); bnx2x_get_c2s_mapping()
4235 if (!IS_MF_BD(bp)) { bnx2x_get_c2s_mapping()
4245 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); bnx2x_get_c2s_mapping()
4252 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); bnx2x_get_c2s_mapping()
4259 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); bnx2x_get_c2s_mapping()
4274 struct bnx2x *bp = netdev_priv(dev); bnx2x_setup_tc() local
4288 if (num_tc > bp->max_cos) { bnx2x_setup_tc()
4290 num_tc, bp->max_cos); bnx2x_setup_tc()
4300 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); bnx2x_setup_tc()
4306 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); bnx2x_setup_tc()
4309 outer_prio, bp->prio_to_cos[outer_prio]); bnx2x_setup_tc()
4323 for (cos = 0; cos < bp->max_cos; cos++) { bnx2x_setup_tc()
4324 count = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_setup_tc()
4325 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); bnx2x_setup_tc()
4339 struct bnx2x *bp = netdev_priv(dev); bnx2x_change_mac_addr() local
4347 if (IS_MF_STORAGE_ONLY(bp)) { bnx2x_change_mac_addr()
4353 rc = bnx2x_set_eth_mac(bp, false); bnx2x_change_mac_addr()
4361 rc = bnx2x_set_eth_mac(bp, true); bnx2x_change_mac_addr()
4363 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) bnx2x_change_mac_addr()
4364 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); bnx2x_change_mac_addr()
4369 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) bnx2x_free_fp_mem_at() argument
4371 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); bnx2x_free_fp_mem_at()
4372 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; bnx2x_free_fp_mem_at()
4382 if (!CHIP_IS_E1x(bp)) bnx2x_free_fp_mem_at()
4384 bnx2x_fp(bp, fp_index, bnx2x_free_fp_mem_at()
4389 bnx2x_fp(bp, fp_index, bnx2x_free_fp_mem_at()
4395 if (!skip_rx_queue(bp, fp_index)) { bnx2x_free_fp_mem_at()
4399 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); bnx2x_free_fp_mem_at()
4400 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), bnx2x_free_fp_mem_at()
4401 bnx2x_fp(bp, fp_index, rx_desc_mapping), bnx2x_free_fp_mem_at()
4404 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), bnx2x_free_fp_mem_at()
4405 bnx2x_fp(bp, fp_index, rx_comp_mapping), bnx2x_free_fp_mem_at()
4410 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); bnx2x_free_fp_mem_at()
4411 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), bnx2x_free_fp_mem_at()
4412 bnx2x_fp(bp, fp_index, rx_sge_mapping), bnx2x_free_fp_mem_at()
4417 if (!skip_tx_queue(bp, fp_index)) { bnx2x_free_fp_mem_at()
4435 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) bnx2x_free_fp_mem_cnic() argument
4438 for_each_cnic_queue(bp, i) bnx2x_free_fp_mem_cnic()
4439 bnx2x_free_fp_mem_at(bp, i); bnx2x_free_fp_mem_cnic()
4442 void bnx2x_free_fp_mem(struct bnx2x *bp) bnx2x_free_fp_mem() argument
4445 for_each_eth_queue(bp, i) bnx2x_free_fp_mem()
4446 bnx2x_free_fp_mem_at(bp, i); bnx2x_free_fp_mem()
4449 static void set_sb_shortcuts(struct bnx2x *bp, int index) set_sb_shortcuts() argument
4451 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); set_sb_shortcuts()
4452 if (!CHIP_IS_E1x(bp)) { set_sb_shortcuts()
4453 bnx2x_fp(bp, index, sb_index_values) = set_sb_shortcuts()
4455 bnx2x_fp(bp, index, sb_running_index) = set_sb_shortcuts()
4458 bnx2x_fp(bp, index, sb_index_values) = set_sb_shortcuts()
4460 bnx2x_fp(bp, index, sb_running_index) = set_sb_shortcuts()
4469 struct bnx2x *bp = fp->bp; bnx2x_alloc_rx_bds() local
4480 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { bnx2x_alloc_rx_bds()
4499 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; bnx2x_alloc_rx_bds()
4522 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) bnx2x_alloc_fp_mem_at() argument
4525 struct bnx2x_fastpath *fp = &bp->fp[index]; bnx2x_alloc_fp_mem_at()
4530 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { bnx2x_alloc_fp_mem_at()
4532 bp->rx_ring_size = rx_ring_size; bnx2x_alloc_fp_mem_at()
4533 } else if (!bp->rx_ring_size) { bnx2x_alloc_fp_mem_at()
4534 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); bnx2x_alloc_fp_mem_at()
4536 if (CHIP_IS_E3(bp)) { bnx2x_alloc_fp_mem_at()
4537 u32 cfg = SHMEM_RD(bp, bnx2x_alloc_fp_mem_at()
4538 dev_info.port_hw_config[BP_PORT(bp)]. bnx2x_alloc_fp_mem_at()
4548 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : bnx2x_alloc_fp_mem_at()
4551 bp->rx_ring_size = rx_ring_size; bnx2x_alloc_fp_mem_at()
4553 rx_ring_size = bp->rx_ring_size; bnx2x_alloc_fp_mem_at()
4558 sb = &bnx2x_fp(bp, index, status_blk); bnx2x_alloc_fp_mem_at()
4562 if (!CHIP_IS_E1x(bp)) { bnx2x_alloc_fp_mem_at()
4563 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), bnx2x_alloc_fp_mem_at()
4568 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), bnx2x_alloc_fp_mem_at()
4579 set_sb_shortcuts(bp, index); bnx2x_alloc_fp_mem_at()
4582 if (!skip_tx_queue(bp, index)) { bnx2x_alloc_fp_mem_at()
4604 if (!skip_rx_queue(bp, index)) {
4606 bnx2x_fp(bp, index, rx_buf_ring) =
4608 if (!bnx2x_fp(bp, index, rx_buf_ring))
4610 bnx2x_fp(bp, index, rx_desc_ring) =
4611 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4613 if (!bnx2x_fp(bp, index, rx_desc_ring))
4617 bnx2x_fp(bp, index, rx_comp_ring) =
4618 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4620 if (!bnx2x_fp(bp, index, rx_comp_ring))
4624 bnx2x_fp(bp, index, rx_page_ring) =
4627 if (!bnx2x_fp(bp, index, rx_page_ring))
4629 bnx2x_fp(bp, index, rx_sge_ring) =
4630 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4632 if (!bnx2x_fp(bp, index, rx_sge_ring))
4659 bnx2x_free_fp_mem_at(bp, index);
4665 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) bnx2x_alloc_fp_mem_cnic() argument
4667 if (!NO_FCOE(bp)) bnx2x_alloc_fp_mem_cnic()
4669 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) bnx2x_alloc_fp_mem_cnic()
4678 static int bnx2x_alloc_fp_mem(struct bnx2x *bp) bnx2x_alloc_fp_mem() argument
4687 if (bnx2x_alloc_fp_mem_at(bp, 0)) bnx2x_alloc_fp_mem()
4691 for_each_nondefault_eth_queue(bp, i) for_each_nondefault_eth_queue()
4692 if (bnx2x_alloc_fp_mem_at(bp, i)) for_each_nondefault_eth_queue()
4696 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { for_each_nondefault_eth_queue()
4697 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; for_each_nondefault_eth_queue()
4700 bnx2x_shrink_eth_fp(bp, delta); for_each_nondefault_eth_queue()
4701 if (CNIC_SUPPORT(bp)) for_each_nondefault_eth_queue()
4708 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); for_each_nondefault_eth_queue()
4709 bp->num_ethernet_queues -= delta; for_each_nondefault_eth_queue()
4710 bp->num_queues = bp->num_ethernet_queues + for_each_nondefault_eth_queue()
4711 bp->num_cnic_queues; for_each_nondefault_eth_queue()
4713 bp->num_queues + delta, bp->num_queues); for_each_nondefault_eth_queue()
4719 void bnx2x_free_mem_bp(struct bnx2x *bp) bnx2x_free_mem_bp() argument
4723 for (i = 0; i < bp->fp_array_size; i++) bnx2x_free_mem_bp()
4724 kfree(bp->fp[i].tpa_info); bnx2x_free_mem_bp()
4725 kfree(bp->fp); bnx2x_free_mem_bp()
4726 kfree(bp->sp_objs); bnx2x_free_mem_bp()
4727 kfree(bp->fp_stats); bnx2x_free_mem_bp()
4728 kfree(bp->bnx2x_txq); bnx2x_free_mem_bp()
4729 kfree(bp->msix_table); bnx2x_free_mem_bp()
4730 kfree(bp->ilt); bnx2x_free_mem_bp()
4733 int bnx2x_alloc_mem_bp(struct bnx2x *bp) bnx2x_alloc_mem_bp() argument
4746 msix_table_size = bp->igu_sb_cnt; bnx2x_alloc_mem_bp()
4747 if (IS_PF(bp)) bnx2x_alloc_mem_bp()
4752 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); bnx2x_alloc_mem_bp()
4753 bp->fp_array_size = fp_array_size; bnx2x_alloc_mem_bp()
4754 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); bnx2x_alloc_mem_bp()
4756 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); bnx2x_alloc_mem_bp()
4759 for (i = 0; i < bp->fp_array_size; i++) { bnx2x_alloc_mem_bp()
4767 bp->fp = fp; bnx2x_alloc_mem_bp()
4770 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), bnx2x_alloc_mem_bp()
4772 if (!bp->sp_objs) bnx2x_alloc_mem_bp()
4776 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), bnx2x_alloc_mem_bp()
4778 if (!bp->fp_stats) bnx2x_alloc_mem_bp()
4783 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); bnx2x_alloc_mem_bp()
4786 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), bnx2x_alloc_mem_bp()
4788 if (!bp->bnx2x_txq) bnx2x_alloc_mem_bp()
4795 bp->msix_table = tbl; bnx2x_alloc_mem_bp()
4801 bp->ilt = ilt; bnx2x_alloc_mem_bp()
4805 bnx2x_free_mem_bp(bp); bnx2x_alloc_mem_bp()
4811 struct bnx2x *bp = netdev_priv(dev); bnx2x_reload_if_running() local
4816 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_reload_if_running()
4817 return bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_reload_if_running()
4820 int bnx2x_get_cur_phy_idx(struct bnx2x *bp) bnx2x_get_cur_phy_idx() argument
4823 if (bp->link_params.num_phys <= 1) bnx2x_get_cur_phy_idx()
4826 if (bp->link_vars.link_up) { bnx2x_get_cur_phy_idx()
4829 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && bnx2x_get_cur_phy_idx()
4830 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) bnx2x_get_cur_phy_idx()
4834 switch (bnx2x_phy_selection(&bp->link_params)) { bnx2x_get_cur_phy_idx()
4849 int bnx2x_get_link_cfg_idx(struct bnx2x *bp) bnx2x_get_link_cfg_idx() argument
4851 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_link_cfg_idx()
4858 if (bp->link_params.multi_phy_config & bnx2x_get_link_cfg_idx()
4871 struct bnx2x *bp = netdev_priv(dev); bnx2x_fcoe_get_wwn() local
4872 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; bnx2x_fcoe_get_wwn()
4895 struct bnx2x *bp = netdev_priv(dev); bnx2x_change_mtu() local
4897 if (pci_num_vf(bp->pdev)) { bnx2x_change_mtu()
4902 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_change_mtu()
4919 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) bnx2x_change_mtu()
4920 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); bnx2x_change_mtu()
4928 struct bnx2x *bp = netdev_priv(dev); bnx2x_fix_features() local
4930 if (pci_num_vf(bp->pdev)) { bnx2x_fix_features()
4936 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { bnx2x_fix_features()
4958 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_features() local
4964 if (!pci_num_vf(bp->pdev)) { bnx2x_set_features()
4966 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { bnx2x_set_features()
4967 bp->link_params.loopback_mode = LOOPBACK_BMAC; bnx2x_set_features()
4971 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { bnx2x_set_features()
4972 bp->link_params.loopback_mode = LOOPBACK_NONE; bnx2x_set_features()
4983 if ((changes & NETIF_F_GRO) && bp->disable_tpa) bnx2x_set_features()
4990 if (bp->recovery_state == BNX2X_RECOVERY_DONE) { bnx2x_set_features()
5003 struct bnx2x *bp = netdev_priv(dev); bnx2x_tx_timeout() local
5006 if (!bp->panic) bnx2x_tx_timeout()
5011 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); bnx2x_tx_timeout()
5017 struct bnx2x *bp; bnx2x_suspend() local
5023 bp = netdev_priv(dev); bnx2x_suspend()
5036 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_suspend()
5038 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); bnx2x_suspend()
5048 struct bnx2x *bp; bnx2x_resume() local
5055 bp = netdev_priv(dev); bnx2x_resume()
5057 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_resume()
5071 bnx2x_set_power_state(bp, PCI_D0); bnx2x_resume()
5074 rc = bnx2x_nic_load(bp, LOAD_OPEN); bnx2x_resume()
5081 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, bnx2x_set_ctx_validation() argument
5091 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), bnx2x_set_ctx_validation()
5095 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), bnx2x_set_ctx_validation()
5099 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, storm_memset_hc_timeout() argument
5105 REG_WR8(bp, addr, ticks); storm_memset_hc_timeout()
5111 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, storm_memset_hc_disable() argument
5118 u8 flags = REG_RD8(bp, addr); storm_memset_hc_disable()
5122 REG_WR8(bp, addr, flags); storm_memset_hc_disable()
5128 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, bnx2x_update_coalesce_sb_index() argument
5131 int port = BP_PORT(bp); bnx2x_update_coalesce_sb_index()
5134 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); bnx2x_update_coalesce_sb_index()
5137 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); bnx2x_update_coalesce_sb_index()
5140 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, bnx2x_schedule_sp_rtnl() argument
5144 set_bit(flag, &bp->sp_rtnl_state); bnx2x_schedule_sp_rtnl()
5148 schedule_delayed_work(&bp->sp_rtnl_task, 0); bnx2x_schedule_sp_rtnl()
H A Dbnx2x_ethtool.c192 static int bnx2x_get_port_type(struct bnx2x *bp) bnx2x_get_port_type() argument
195 u32 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_port_type()
196 switch (bp->link_params.phy[phy_idx].media_type) { bnx2x_get_port_type()
224 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_vf_settings() local
226 if (bp->state == BNX2X_STATE_OPEN) { bnx2x_get_vf_settings()
228 &bp->vf_link_vars.link_report_flags)) bnx2x_get_vf_settings()
233 ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); bnx2x_get_vf_settings()
260 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_settings() local
261 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_get_settings()
265 cmd->supported = bp->port.supported[cfg_idx] | bnx2x_get_settings()
266 (bp->port.supported[cfg_idx ^ 1] & bnx2x_get_settings()
268 cmd->advertising = bp->port.advertising[cfg_idx]; bnx2x_get_settings()
269 media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; bnx2x_get_settings()
275 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && bnx2x_get_settings()
276 !(bp->flags & MF_FUNC_DIS)) { bnx2x_get_settings()
277 cmd->duplex = bp->link_vars.duplex; bnx2x_get_settings()
279 if (IS_MF(bp) && !BP_NOMCP(bp)) bnx2x_get_settings()
280 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); bnx2x_get_settings()
282 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); bnx2x_get_settings()
288 cmd->port = bnx2x_get_port_type(bp); bnx2x_get_settings()
290 cmd->phy_address = bp->mdio.prtad; bnx2x_get_settings()
293 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) bnx2x_get_settings()
299 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { bnx2x_get_settings()
300 u32 status = bp->link_vars.link_status; bnx2x_get_settings()
359 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_settings() local
363 if (IS_MF_SD(bp)) bnx2x_set_settings()
381 if (IS_MF_SI(bp)) { bnx2x_set_settings()
383 u32 line_speed = bp->link_vars.line_speed; bnx2x_set_settings()
389 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { bnx2x_set_settings()
404 if (bp->state != BNX2X_STATE_OPEN) bnx2x_set_settings()
406 bp->pending_max = part; bnx2x_set_settings()
408 bnx2x_update_max_mf_config(bp, part); bnx2x_set_settings()
413 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_settings()
414 old_multi_phy_config = bp->link_params.multi_phy_config; bnx2x_set_settings()
415 if (cmd->port != bnx2x_get_port_type(bp)) { bnx2x_set_settings()
418 if (!(bp->port.supported[0] & SUPPORTED_TP || bnx2x_set_settings()
419 bp->port.supported[1] & SUPPORTED_TP)) { bnx2x_set_settings()
424 bp->link_params.multi_phy_config &= bnx2x_set_settings()
426 if (bp->link_params.multi_phy_config & bnx2x_set_settings()
428 bp->link_params.multi_phy_config |= bnx2x_set_settings()
431 bp->link_params.multi_phy_config |= bnx2x_set_settings()
437 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || bnx2x_set_settings()
438 bp->port.supported[1] & SUPPORTED_FIBRE)) { bnx2x_set_settings()
443 bp->link_params.multi_phy_config &= bnx2x_set_settings()
445 if (bp->link_params.multi_phy_config & bnx2x_set_settings()
447 bp->link_params.multi_phy_config |= bnx2x_set_settings()
450 bp->link_params.multi_phy_config |= bnx2x_set_settings()
459 new_multi_phy_config = bp->link_params.multi_phy_config; bnx2x_set_settings()
461 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_settings()
463 bp->link_params.multi_phy_config = old_multi_phy_config; bnx2x_set_settings()
467 u32 an_supported_speed = bp->port.supported[cfg_idx]; bnx2x_set_settings()
468 if (bp->link_params.phy[EXT_PHY1].type == bnx2x_set_settings()
472 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { bnx2x_set_settings()
484 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; bnx2x_set_settings()
485 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; bnx2x_set_settings()
486 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | bnx2x_set_settings()
490 bp->link_params.speed_cap_mask[cfg_idx] = 0; bnx2x_set_settings()
492 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
496 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
500 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
504 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
508 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
513 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
519 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
523 bp->link_params.speed_cap_mask[cfg_idx] |= bnx2x_set_settings()
531 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings()
541 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings()
555 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings()
565 if (!(bp->port.supported[cfg_idx] & bnx2x_set_settings()
584 if (bp->port.supported[cfg_idx] & bnx2x_set_settings()
589 } else if (bp->port.supported[cfg_idx] & bnx2x_set_settings()
607 if (!(bp->port.supported[cfg_idx] bnx2x_set_settings()
624 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_set_settings()
625 if ((bp->port.supported[cfg_idx] & bnx2x_set_settings()
627 (bp->link_params.phy[phy_idx].media_type != bnx2x_set_settings()
631 } else if (bp->port.supported[cfg_idx] & bnx2x_set_settings()
648 bp->link_params.req_line_speed[cfg_idx] = speed; bnx2x_set_settings()
649 bp->link_params.req_duplex[cfg_idx] = cmd->duplex; bnx2x_set_settings()
650 bp->port.advertising[cfg_idx] = advertising; bnx2x_set_settings()
655 bp->link_params.req_line_speed[cfg_idx], bnx2x_set_settings()
656 bp->link_params.req_duplex[cfg_idx], bnx2x_set_settings()
657 bp->port.advertising[cfg_idx]); bnx2x_set_settings()
660 bp->link_params.multi_phy_config = new_multi_phy_config; bnx2x_set_settings()
662 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_settings()
663 bnx2x_force_link_reset(bp); bnx2x_set_settings()
664 bnx2x_link_set(bp); bnx2x_set_settings()
673 static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) __bnx2x_get_preset_regs_len() argument
675 if (CHIP_IS_E1(bp)) __bnx2x_get_preset_regs_len()
677 else if (CHIP_IS_E1H(bp)) __bnx2x_get_preset_regs_len()
679 else if (CHIP_IS_E2(bp)) __bnx2x_get_preset_regs_len()
681 else if (CHIP_IS_E3A0(bp)) __bnx2x_get_preset_regs_len()
683 else if (CHIP_IS_E3B0(bp)) __bnx2x_get_preset_regs_len()
689 static int __bnx2x_get_regs_len(struct bnx2x *bp) __bnx2x_get_regs_len() argument
696 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); __bnx2x_get_regs_len()
703 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_regs_len() local
706 if (IS_VF(bp)) bnx2x_get_regs_len()
709 regdump_len = __bnx2x_get_regs_len(bp); bnx2x_get_regs_len()
726 static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) __bnx2x_get_page_addr_ar() argument
728 if (CHIP_IS_E2(bp)) __bnx2x_get_page_addr_ar()
730 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_addr_ar()
736 static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) __bnx2x_get_page_reg_num() argument
738 if (CHIP_IS_E2(bp)) __bnx2x_get_page_reg_num()
740 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_reg_num()
746 static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) __bnx2x_get_page_write_ar() argument
748 if (CHIP_IS_E2(bp)) __bnx2x_get_page_write_ar()
750 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_write_ar()
756 static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) __bnx2x_get_page_write_num() argument
758 if (CHIP_IS_E2(bp)) __bnx2x_get_page_write_num()
760 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_write_num()
766 static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) __bnx2x_get_page_read_ar() argument
768 if (CHIP_IS_E2(bp)) __bnx2x_get_page_read_ar()
770 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_read_ar()
776 static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) __bnx2x_get_page_read_num() argument
778 if (CHIP_IS_E2(bp)) __bnx2x_get_page_read_num()
780 else if (CHIP_IS_E3(bp)) __bnx2x_get_page_read_num()
786 static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, bnx2x_is_reg_in_chip() argument
789 if (CHIP_IS_E1(bp)) bnx2x_is_reg_in_chip()
791 else if (CHIP_IS_E1H(bp)) bnx2x_is_reg_in_chip()
793 else if (CHIP_IS_E2(bp)) bnx2x_is_reg_in_chip()
795 else if (CHIP_IS_E3A0(bp)) bnx2x_is_reg_in_chip()
797 else if (CHIP_IS_E3B0(bp)) bnx2x_is_reg_in_chip()
803 static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, bnx2x_is_wreg_in_chip() argument
806 if (CHIP_IS_E1(bp)) bnx2x_is_wreg_in_chip()
808 else if (CHIP_IS_E1H(bp)) bnx2x_is_wreg_in_chip()
810 else if (CHIP_IS_E2(bp)) bnx2x_is_wreg_in_chip()
812 else if (CHIP_IS_E3A0(bp)) bnx2x_is_wreg_in_chip()
814 else if (CHIP_IS_E3B0(bp)) bnx2x_is_wreg_in_chip()
823 * @bp device handle
831 static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) bnx2x_read_pages_regs() argument
836 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); bnx2x_read_pages_regs()
838 int num_pages = __bnx2x_get_page_reg_num(bp); bnx2x_read_pages_regs()
840 const u32 *write_addr = __bnx2x_get_page_write_ar(bp); bnx2x_read_pages_regs()
842 int write_num = __bnx2x_get_page_write_num(bp); bnx2x_read_pages_regs()
844 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); bnx2x_read_pages_regs()
846 int read_num = __bnx2x_get_page_read_num(bp); bnx2x_read_pages_regs()
851 REG_WR(bp, write_addr[j], page_addr[i]); bnx2x_read_pages_regs()
859 *p++ = REG_RD(bp, addr); bnx2x_read_pages_regs()
867 static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) __bnx2x_get_preset_regs() argument
872 if (CHIP_IS_E1(bp)) __bnx2x_get_preset_regs()
874 else if (CHIP_IS_E1H(bp)) __bnx2x_get_preset_regs()
876 else if (CHIP_IS_E2(bp)) __bnx2x_get_preset_regs()
878 else if (CHIP_IS_E3A0(bp)) __bnx2x_get_preset_regs()
880 else if (CHIP_IS_E3B0(bp)) __bnx2x_get_preset_regs()
885 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && __bnx2x_get_preset_regs()
888 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); __bnx2x_get_preset_regs()
894 if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) && __bnx2x_get_preset_regs()
897 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); __bnx2x_get_preset_regs()
902 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && __bnx2x_get_preset_regs()
905 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); __bnx2x_get_preset_regs()
912 *p++ = REG_RD(bp, addr + j*4); __bnx2x_get_preset_regs()
918 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { __bnx2x_get_preset_regs()
920 bnx2x_read_pages_regs(bp, p, preset); __bnx2x_get_preset_regs()
926 static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) __bnx2x_get_regs() argument
938 __bnx2x_get_preset_regs(bp, p, preset_idx); __bnx2x_get_regs()
939 p += __bnx2x_get_preset_regs_len(bp, preset_idx); __bnx2x_get_regs()
947 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_regs() local
953 if (!netif_running(bp->dev)) bnx2x_get_regs()
961 bnx2x_disable_blocks_parity(bp); bnx2x_get_regs()
968 if (CHIP_IS_E1(bp)) { bnx2x_get_regs()
970 } else if (CHIP_IS_E1H(bp)) { bnx2x_get_regs()
972 } else if (CHIP_IS_E2(bp)) { bnx2x_get_regs()
974 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs()
975 } else if (CHIP_IS_E3A0(bp)) { bnx2x_get_regs()
977 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs()
978 } else if (CHIP_IS_E3B0(bp)) { bnx2x_get_regs()
980 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_regs()
987 __bnx2x_get_regs(bp, p); bnx2x_get_regs()
990 bnx2x_clear_blocks_parity(bp); bnx2x_get_regs()
991 bnx2x_enable_blocks_parity(bp); bnx2x_get_regs()
996 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_preset_regs_len() local
999 regdump_len = __bnx2x_get_preset_regs_len(bp, preset); bnx2x_get_preset_regs_len()
1008 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_dump() local
1014 bp->dump_preset_idx = val->flag; bnx2x_set_dump()
1021 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_dump_flag() local
1024 dump->flag = bp->dump_preset_idx; bnx2x_get_dump_flag()
1026 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); bnx2x_get_dump_flag()
1028 bp->dump_preset_idx, dump->len); bnx2x_get_dump_flag()
1037 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_dump_data() local
1045 bnx2x_disable_blocks_parity(bp); bnx2x_get_dump_data()
1048 dump_hdr.preset = bp->dump_preset_idx; bnx2x_get_dump_data()
1054 if (CHIP_IS_E1(bp)) { bnx2x_get_dump_data()
1056 } else if (CHIP_IS_E1H(bp)) { bnx2x_get_dump_data()
1058 } else if (CHIP_IS_E2(bp)) { bnx2x_get_dump_data()
1060 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data()
1061 } else if (CHIP_IS_E3A0(bp)) { bnx2x_get_dump_data()
1063 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data()
1064 } else if (CHIP_IS_E3B0(bp)) { bnx2x_get_dump_data()
1066 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); bnx2x_get_dump_data()
1073 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); bnx2x_get_dump_data()
1076 bnx2x_clear_blocks_parity(bp); bnx2x_get_dump_data()
1077 bnx2x_enable_blocks_parity(bp); bnx2x_get_dump_data()
1085 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_drvinfo() local
1090 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); bnx2x_get_drvinfo()
1092 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); bnx2x_get_drvinfo()
1097 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_wol() local
1099 if (bp->flags & NO_WOL_FLAG) { bnx2x_get_wol()
1104 if (bp->wol) bnx2x_get_wol()
1114 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_wol() local
1122 if (bp->flags & NO_WOL_FLAG) { bnx2x_set_wol()
1126 bp->wol = 1; bnx2x_set_wol()
1128 bp->wol = 0; bnx2x_set_wol()
1130 if (SHMEM2_HAS(bp, curr_cfg)) bnx2x_set_wol()
1131 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); bnx2x_set_wol()
1138 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_msglevel() local
1140 return bp->msg_enable; bnx2x_get_msglevel()
1145 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_msglevel() local
1149 if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) bnx2x_set_msglevel()
1150 bnx2x_fw_dump_lvl(bp, KERN_INFO); bnx2x_set_msglevel()
1151 bp->msg_enable = level; bnx2x_set_msglevel()
1157 struct bnx2x *bp = netdev_priv(dev); bnx2x_nway_reset() local
1159 if (!bp->port.pmf) bnx2x_nway_reset()
1163 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_nway_reset()
1164 bnx2x_force_link_reset(bp); bnx2x_nway_reset()
1165 bnx2x_link_set(bp); bnx2x_nway_reset()
1173 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_link() local
1175 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) bnx2x_get_link()
1178 if (IS_VF(bp)) bnx2x_get_link()
1180 &bp->vf_link_vars.link_report_flags); bnx2x_get_link()
1182 return bp->link_vars.link_up; bnx2x_get_link()
1187 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eeprom_len() local
1189 return bp->common.flash_size; bnx2x_get_eeprom_len()
1205 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) bnx2x_acquire_nvram_lock() argument
1207 int port = BP_PORT(bp); bnx2x_acquire_nvram_lock()
1212 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); bnx2x_acquire_nvram_lock()
1216 if (CHIP_REV_IS_SLOW(bp)) bnx2x_acquire_nvram_lock()
1220 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_acquire_nvram_lock()
1224 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); bnx2x_acquire_nvram_lock()
1234 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); bnx2x_acquire_nvram_lock()
1241 static int bnx2x_release_nvram_lock(struct bnx2x *bp) bnx2x_release_nvram_lock() argument
1243 int port = BP_PORT(bp); bnx2x_release_nvram_lock()
1249 if (CHIP_REV_IS_SLOW(bp)) bnx2x_release_nvram_lock()
1253 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, bnx2x_release_nvram_lock()
1257 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); bnx2x_release_nvram_lock()
1271 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); bnx2x_release_nvram_lock()
1275 static void bnx2x_enable_nvram_access(struct bnx2x *bp) bnx2x_enable_nvram_access() argument
1279 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); bnx2x_enable_nvram_access()
1282 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, bnx2x_enable_nvram_access()
1287 static void bnx2x_disable_nvram_access(struct bnx2x *bp) bnx2x_disable_nvram_access() argument
1291 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); bnx2x_disable_nvram_access()
1294 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, bnx2x_disable_nvram_access()
1299 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, bnx2x_nvram_read_dword() argument
1309 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); bnx2x_nvram_read_dword()
1312 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, bnx2x_nvram_read_dword()
1316 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); bnx2x_nvram_read_dword()
1320 if (CHIP_REV_IS_SLOW(bp)) bnx2x_nvram_read_dword()
1328 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); bnx2x_nvram_read_dword()
1331 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); bnx2x_nvram_read_dword()
1347 int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, bnx2x_nvram_read() argument
1361 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_read()
1364 offset, buf_size, bp->common.flash_size); bnx2x_nvram_read()
1369 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_read()
1374 bnx2x_enable_nvram_access(bp); bnx2x_nvram_read()
1379 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); bnx2x_nvram_read()
1391 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); bnx2x_nvram_read()
1396 bnx2x_disable_nvram_access(bp); bnx2x_nvram_read()
1397 bnx2x_release_nvram_lock(bp); bnx2x_nvram_read()
1402 static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, bnx2x_nvram_read32() argument
1407 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size); bnx2x_nvram_read32()
1419 static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) bnx2x_is_nvm_accessible() argument
1423 struct net_device *dev = pci_get_drvdata(bp->pdev); bnx2x_is_nvm_accessible()
1425 if (bp->pdev->pm_cap) bnx2x_is_nvm_accessible()
1426 rc = pci_read_config_word(bp->pdev, bnx2x_is_nvm_accessible()
1427 bp->pdev->pm_cap + PCI_PM_CTRL, &pm); bnx2x_is_nvm_accessible()
1439 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eeprom() local
1441 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_eeprom()
1454 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); bnx2x_get_eeprom()
1461 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_module_eeprom() local
1466 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_module_eeprom()
1472 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_module_eeprom()
1481 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_eeprom()
1482 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_eeprom()
1483 &bp->link_params, bnx2x_get_module_eeprom()
1488 bnx2x_release_phy_lock(bp); bnx2x_get_module_eeprom()
1506 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_eeprom()
1507 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_eeprom()
1508 &bp->link_params, bnx2x_get_module_eeprom()
1513 bnx2x_release_phy_lock(bp); bnx2x_get_module_eeprom()
1525 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_module_info() local
1529 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_get_module_info()
1534 phy_idx = bnx2x_get_cur_phy_idx(bp); bnx2x_get_module_info()
1535 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_info()
1536 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_info()
1537 &bp->link_params, bnx2x_get_module_info()
1542 bnx2x_release_phy_lock(bp); bnx2x_get_module_info()
1548 bnx2x_acquire_phy_lock(bp); bnx2x_get_module_info()
1549 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], bnx2x_get_module_info()
1550 &bp->link_params, bnx2x_get_module_info()
1555 bnx2x_release_phy_lock(bp); bnx2x_get_module_info()
1572 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, bnx2x_nvram_write_dword() argument
1581 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); bnx2x_nvram_write_dword()
1584 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); bnx2x_nvram_write_dword()
1587 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, bnx2x_nvram_write_dword()
1591 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); bnx2x_nvram_write_dword()
1595 if (CHIP_REV_IS_SLOW(bp)) bnx2x_nvram_write_dword()
1602 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); bnx2x_nvram_write_dword()
1617 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, bnx2x_nvram_write1() argument
1624 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_write1()
1627 offset, buf_size, bp->common.flash_size); bnx2x_nvram_write1()
1632 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_write1()
1637 bnx2x_enable_nvram_access(bp); bnx2x_nvram_write1()
1641 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags); bnx2x_nvram_write1()
1654 rc = bnx2x_nvram_write_dword(bp, align_offset, val, bnx2x_nvram_write1()
1659 bnx2x_disable_nvram_access(bp); bnx2x_nvram_write1()
1660 bnx2x_release_nvram_lock(bp); bnx2x_nvram_write1()
1665 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, bnx2x_nvram_write() argument
1674 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); bnx2x_nvram_write()
1683 if (offset + buf_size > bp->common.flash_size) { bnx2x_nvram_write()
1686 offset, buf_size, bp->common.flash_size); bnx2x_nvram_write()
1691 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_write()
1696 bnx2x_enable_nvram_access(bp); bnx2x_nvram_write()
1716 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); bnx2x_nvram_write()
1731 bnx2x_release_nvram_lock(bp); bnx2x_nvram_write()
1733 rc = bnx2x_acquire_nvram_lock(bp); bnx2x_nvram_write()
1742 bnx2x_disable_nvram_access(bp); bnx2x_nvram_write()
1743 bnx2x_release_nvram_lock(bp); bnx2x_nvram_write()
1751 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_eeprom() local
1752 int port = BP_PORT(bp); bnx2x_set_eeprom()
1756 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_set_eeprom()
1771 !bp->port.pmf) { bnx2x_set_eeprom()
1778 SHMEM_RD(bp, bnx2x_set_eeprom()
1783 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_eeprom()
1785 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom()
1786 rc |= bnx2x_link_reset(&bp->link_params, bnx2x_set_eeprom()
1787 &bp->link_vars, 0); bnx2x_set_eeprom()
1790 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_set_eeprom()
1792 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom()
1793 bnx2x_link_report(bp); bnx2x_set_eeprom()
1797 if (bp->state == BNX2X_STATE_OPEN) { bnx2x_set_eeprom()
1798 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom()
1799 rc |= bnx2x_link_reset(&bp->link_params, bnx2x_set_eeprom()
1800 &bp->link_vars, 1); bnx2x_set_eeprom()
1802 rc |= bnx2x_phy_init(&bp->link_params, bnx2x_set_eeprom()
1803 &bp->link_vars); bnx2x_set_eeprom()
1804 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom()
1805 bnx2x_calc_fc_adv(bp); bnx2x_set_eeprom()
1813 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_set_eeprom()
1816 bnx2x_acquire_phy_lock(bp); bnx2x_set_eeprom()
1818 bnx2x_sfx7101_sp_sw_reset(bp, bnx2x_set_eeprom()
1819 &bp->link_params.phy[EXT_PHY1]); bnx2x_set_eeprom()
1823 bnx2x_ext_phy_hw_reset(bp, port); bnx2x_set_eeprom()
1825 bnx2x_release_phy_lock(bp); bnx2x_set_eeprom()
1828 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); bnx2x_set_eeprom()
1836 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_coalesce() local
1840 coal->rx_coalesce_usecs = bp->rx_ticks; bnx2x_get_coalesce()
1841 coal->tx_coalesce_usecs = bp->tx_ticks; bnx2x_get_coalesce()
1849 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_coalesce() local
1851 bp->rx_ticks = (u16)coal->rx_coalesce_usecs; bnx2x_set_coalesce()
1852 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) bnx2x_set_coalesce()
1853 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; bnx2x_set_coalesce()
1855 bp->tx_ticks = (u16)coal->tx_coalesce_usecs; bnx2x_set_coalesce()
1856 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) bnx2x_set_coalesce()
1857 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; bnx2x_set_coalesce()
1860 bnx2x_update_coalesce(bp); bnx2x_set_coalesce()
1868 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ringparam() local
1872 if (bp->rx_ring_size) bnx2x_get_ringparam()
1873 ering->rx_pending = bp->rx_ring_size; bnx2x_get_ringparam()
1877 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; bnx2x_get_ringparam()
1878 ering->tx_pending = bp->tx_ring_size; bnx2x_get_ringparam()
1884 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_ringparam() local
1890 if (pci_num_vf(bp->pdev)) { bnx2x_set_ringparam()
1896 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_set_ringparam()
1903 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : bnx2x_set_ringparam()
1905 (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) || bnx2x_set_ringparam()
1911 bp->rx_ring_size = ering->rx_pending; bnx2x_set_ringparam()
1912 bp->tx_ring_size = ering->tx_pending; bnx2x_set_ringparam()
1920 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_pauseparam() local
1921 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_get_pauseparam()
1924 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == bnx2x_get_pauseparam()
1928 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx]; bnx2x_get_pauseparam()
1930 cfg_reg = bp->link_params.req_fc_auto_adv; bnx2x_get_pauseparam()
1945 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_pauseparam() local
1946 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_set_pauseparam()
1947 if (IS_MF(bp)) bnx2x_set_pauseparam()
1954 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; bnx2x_set_pauseparam()
1957 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; bnx2x_set_pauseparam()
1960 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; bnx2x_set_pauseparam()
1962 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) bnx2x_set_pauseparam()
1963 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; bnx2x_set_pauseparam()
1966 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { bnx2x_set_pauseparam()
1971 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { bnx2x_set_pauseparam()
1972 bp->link_params.req_flow_ctrl[cfg_idx] = bnx2x_set_pauseparam()
1975 bp->link_params.req_fc_auto_adv = 0; bnx2x_set_pauseparam()
1977 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; bnx2x_set_pauseparam()
1980 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; bnx2x_set_pauseparam()
1982 if (!bp->link_params.req_fc_auto_adv) bnx2x_set_pauseparam()
1983 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE; bnx2x_set_pauseparam()
1987 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); bnx2x_set_pauseparam()
1990 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_pauseparam()
1991 bnx2x_force_link_reset(bp); bnx2x_set_pauseparam()
1992 bnx2x_link_set(bp); bnx2x_set_pauseparam()
2050 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_eee() local
2053 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { bnx2x_get_eee()
2058 eee_cfg = bp->link_vars.eee_status; bnx2x_get_eee()
2083 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_eee() local
2087 if (IS_MF(bp)) bnx2x_set_eee()
2090 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { bnx2x_set_eee()
2095 eee_cfg = bp->link_vars.eee_status; bnx2x_set_eee()
2126 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI; bnx2x_set_eee()
2128 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI; bnx2x_set_eee()
2131 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI; bnx2x_set_eee()
2133 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI; bnx2x_set_eee()
2135 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK; bnx2x_set_eee()
2136 bp->link_params.eee_mode |= (edata->tx_lpi_timer & bnx2x_set_eee()
2143 bnx2x_stats_handle(bp, STATS_EVENT_STOP); bnx2x_set_eee()
2144 bnx2x_force_link_reset(bp); bnx2x_set_eee()
2145 bnx2x_link_set(bp); bnx2x_set_eee()
2169 static int bnx2x_test_registers(struct bnx2x *bp) bnx2x_test_registers() argument
2173 int port = BP_PORT(bp); bnx2x_test_registers()
2260 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_test_registers()
2266 if (CHIP_IS_E1(bp)) bnx2x_test_registers()
2268 else if (CHIP_IS_E1H(bp)) bnx2x_test_registers()
2270 else if (CHIP_IS_E2(bp)) bnx2x_test_registers()
2272 else if (CHIP_IS_E3B0(bp)) bnx2x_test_registers()
2299 save_val = REG_RD(bp, offset); bnx2x_test_registers()
2301 REG_WR(bp, offset, wr_val & mask); bnx2x_test_registers()
2303 val = REG_RD(bp, offset); bnx2x_test_registers()
2306 REG_WR(bp, offset, save_val); bnx2x_test_registers()
2324 static int bnx2x_test_memory(struct bnx2x *bp) bnx2x_test_memory() argument
2364 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_test_memory()
2370 if (CHIP_IS_E1(bp)) bnx2x_test_memory()
2372 else if (CHIP_IS_E1H(bp)) bnx2x_test_memory()
2374 else if (CHIP_IS_E2(bp)) bnx2x_test_memory()
2381 val = REG_RD(bp, prty_tbl[i].offset); bnx2x_test_memory()
2392 REG_RD(bp, mem_tbl[i].offset + j*4); bnx2x_test_memory()
2396 val = REG_RD(bp, prty_tbl[i].offset); bnx2x_test_memory()
2410 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) bnx2x_wait_for_link() argument
2415 while (bnx2x_link_test(bp, is_serdes) && cnt--) bnx2x_wait_for_link()
2418 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) bnx2x_wait_for_link()
2422 while (!bp->link_vars.link_up && cnt--) bnx2x_wait_for_link()
2425 if (cnt <= 0 && !bp->link_vars.link_up) bnx2x_wait_for_link()
2431 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_run_loopback() argument
2436 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; bnx2x_run_loopback()
2437 struct bnx2x_fastpath *fp_tx = &bp->fp[0]; bnx2x_run_loopback()
2451 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, bnx2x_run_loopback()
2457 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { bnx2x_run_loopback()
2463 if (CHIP_IS_E3(bp)) { bnx2x_run_loopback()
2464 int cfg_idx = bnx2x_get_link_cfg_idx(bp); bnx2x_run_loopback()
2465 if (bp->port.supported[cfg_idx] & bnx2x_run_loopback()
2469 bp->link_params.loopback_mode = LOOPBACK_XMAC; bnx2x_run_loopback()
2471 bp->link_params.loopback_mode = LOOPBACK_UMAC; bnx2x_run_loopback()
2473 bp->link_params.loopback_mode = LOOPBACK_BMAC; bnx2x_run_loopback()
2475 bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_run_loopback()
2478 if (bp->link_params.loopback_mode != LOOPBACK_EXT) { bnx2x_run_loopback()
2490 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? bnx2x_run_loopback()
2491 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); bnx2x_run_loopback()
2492 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); bnx2x_run_loopback()
2499 memcpy(packet, bp->dev->dev_addr, ETH_ALEN); bnx2x_run_loopback()
2504 mapping = dma_map_single(&bp->pdev->dev, skb->data, bnx2x_run_loopback()
2506 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { bnx2x_run_loopback()
2544 if (CHIP_IS_E1x(bp)) { bnx2x_run_loopback()
2565 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); bnx2x_run_loopback()
2583 if (bp->common.int_block == INT_BLOCK_IGU) { bnx2x_run_loopback()
2589 bnx2x_tx_int(bp, txdata); bnx2x_run_loopback()
2608 dma_sync_single_for_cpu(&bp->pdev->dev, bnx2x_run_loopback()
2626 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, bnx2x_run_loopback()
2630 bp->link_params.loopback_mode = LOOPBACK_NONE; bnx2x_run_loopback()
2635 static int bnx2x_test_loopback(struct bnx2x *bp) bnx2x_test_loopback() argument
2639 if (BP_NOMCP(bp)) bnx2x_test_loopback()
2642 if (!netif_running(bp->dev)) bnx2x_test_loopback()
2645 bnx2x_netif_stop(bp, 1); bnx2x_test_loopback()
2646 bnx2x_acquire_phy_lock(bp); bnx2x_test_loopback()
2648 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); bnx2x_test_loopback()
2654 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); bnx2x_test_loopback()
2660 bnx2x_release_phy_lock(bp); bnx2x_test_loopback()
2661 bnx2x_netif_start(bp); bnx2x_test_loopback()
2666 static int bnx2x_test_ext_loopback(struct bnx2x *bp) bnx2x_test_ext_loopback() argument
2670 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; bnx2x_test_ext_loopback()
2672 if (BP_NOMCP(bp)) bnx2x_test_ext_loopback()
2675 if (!netif_running(bp->dev)) bnx2x_test_ext_loopback()
2678 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_test_ext_loopback()
2679 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); bnx2x_test_ext_loopback()
2685 bnx2x_wait_for_link(bp, 1, is_serdes); bnx2x_test_ext_loopback()
2687 bnx2x_netif_stop(bp, 1); bnx2x_test_ext_loopback()
2689 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); bnx2x_test_ext_loopback()
2693 bnx2x_netif_start(bp); bnx2x_test_ext_loopback()
2720 static int bnx2x_nvram_crc(struct bnx2x *bp, bnx2x_nvram_crc() argument
2734 rc = bnx2x_nvram_read(bp, offset + done, buff, count); bnx2x_nvram_crc()
2749 static int bnx2x_test_nvram_dir(struct bnx2x *bp, bnx2x_test_nvram_dir() argument
2761 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff); bnx2x_test_nvram_dir()
2769 static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff) bnx2x_test_dir_entry() argument
2774 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry)); bnx2x_test_dir_entry()
2778 return bnx2x_test_nvram_dir(bp, &entry, buff); bnx2x_test_dir_entry()
2781 static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff) bnx2x_test_nvram_ext_dirs() argument
2787 rc = bnx2x_nvram_read32(bp, bnx2x_test_nvram_ext_dirs()
2797 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr, bnx2x_test_nvram_ext_dirs()
2805 rc = bnx2x_test_dir_entry(bp, dir_offset + bnx2x_test_nvram_ext_dirs()
2815 static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff) bnx2x_test_nvram_dirs() argument
2823 rc = bnx2x_test_dir_entry(bp, dir_offset + bnx2x_test_nvram_dirs()
2830 return bnx2x_test_nvram_ext_dirs(bp, buff); bnx2x_test_nvram_dirs()
2838 static int bnx2x_test_nvram_tbl(struct bnx2x *bp, bnx2x_test_nvram_tbl() argument
2844 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset, bnx2x_test_nvram_tbl()
2857 static int bnx2x_test_nvram(struct bnx2x *bp) bnx2x_test_nvram() argument
2878 if (BP_NOMCP(bp)) bnx2x_test_nvram()
2888 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic)); bnx2x_test_nvram()
2903 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf); bnx2x_test_nvram()
2907 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) { bnx2x_test_nvram()
2908 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & bnx2x_test_nvram()
2914 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf); bnx2x_test_nvram()
2920 rc = bnx2x_test_nvram_dirs(bp, buf); bnx2x_test_nvram()
2928 static int bnx2x_test_intr(struct bnx2x *bp) bnx2x_test_intr() argument
2932 if (!netif_running(bp->dev)) { bnx2x_test_intr()
2938 params.q_obj = &bp->sp_objs->q_obj; bnx2x_test_intr()
2943 return bnx2x_queue_state_change(bp, &params); bnx2x_test_intr()
2949 struct bnx2x *bp = netdev_priv(dev); bnx2x_self_test() local
2953 if (pci_num_vf(bp->pdev)) { bnx2x_self_test()
2959 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_self_test()
2960 netdev_err(bp->dev, bnx2x_self_test()
2971 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); bnx2x_self_test()
2973 if (bnx2x_test_nvram(bp) != 0) { bnx2x_self_test()
2974 if (!IS_MF(bp)) bnx2x_self_test()
2986 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; bnx2x_self_test()
2987 link_up = bp->link_vars.link_up; bnx2x_self_test()
2989 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { bnx2x_self_test()
2990 int port = BP_PORT(bp); bnx2x_self_test()
2994 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); bnx2x_self_test()
2996 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); bnx2x_self_test()
2998 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_self_test()
2999 rc = bnx2x_nic_load(bp, LOAD_DIAG); bnx2x_self_test()
3008 bnx2x_wait_for_link(bp, 1, is_serdes); bnx2x_self_test()
3010 if (bnx2x_test_registers(bp) != 0) { bnx2x_self_test()
3014 if (bnx2x_test_memory(bp) != 0) { bnx2x_self_test()
3019 buf[2] = bnx2x_test_loopback(bp); /* internal LB */ bnx2x_self_test()
3024 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ bnx2x_self_test()
3030 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); bnx2x_self_test()
3033 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); bnx2x_self_test()
3034 rc = bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_self_test()
3042 bnx2x_wait_for_link(bp, link_up, is_serdes); bnx2x_self_test()
3045 if (bnx2x_test_intr(bp) != 0) { bnx2x_self_test()
3046 if (!IS_MF(bp)) bnx2x_self_test()
3055 while (bnx2x_link_test(bp, is_serdes) && --cnt) bnx2x_self_test()
3060 if (!IS_MF(bp)) bnx2x_self_test()
3071 #define HIDE_PORT_STAT(bp) \
3072 ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
3073 IS_VF(bp))
3078 static int bnx2x_num_stat_queues(struct bnx2x *bp) bnx2x_num_stat_queues() argument
3080 return BNX2X_NUM_ETH_QUEUES(bp); bnx2x_num_stat_queues()
3085 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_sset_count() local
3090 if (is_multi(bp)) { bnx2x_get_sset_count()
3091 num_strings = bnx2x_num_stat_queues(bp) * bnx2x_get_sset_count()
3095 if (HIDE_PORT_STAT(bp)) { bnx2x_get_sset_count()
3105 return BNX2X_NUM_TESTS(bp); bnx2x_get_sset_count()
3117 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_private_flags() local
3120 flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; bnx2x_get_private_flags()
3121 flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; bnx2x_get_private_flags()
3122 flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; bnx2x_get_private_flags()
3129 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_strings() local
3136 if (is_multi(bp)) { for_each_eth_queue()
3137 for_each_eth_queue(bp, i) { for_each_eth_queue()
3150 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3161 if (!IS_MF(bp))
3166 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
3179 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ethtool_stats() local
3183 if (is_multi(bp)) { for_each_eth_queue()
3184 for_each_eth_queue(bp, i) { for_each_eth_queue()
3185 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; for_each_eth_queue()
3206 hw_stats = (u32 *)&bp->eth_stats;
3208 if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
3232 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_phys_id() local
3234 if (!bnx2x_is_nvm_accessible(bp)) { bnx2x_set_phys_id()
3245 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id()
3246 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id()
3248 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id()
3252 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id()
3253 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id()
3255 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id()
3259 bnx2x_acquire_phy_lock(bp); bnx2x_set_phys_id()
3260 bnx2x_set_led(&bp->link_params, &bp->link_vars, bnx2x_set_phys_id()
3262 bp->link_vars.line_speed); bnx2x_set_phys_id()
3263 bnx2x_release_phy_lock(bp); bnx2x_set_phys_id()
3269 static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) bnx2x_get_rss_flags() argument
3278 if (bp->rss_conf_obj.udp_rss_v4) bnx2x_get_rss_flags()
3285 if (bp->rss_conf_obj.udp_rss_v6) bnx2x_get_rss_flags()
3306 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_rxnfc() local
3310 info->data = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_get_rxnfc()
3313 return bnx2x_get_rss_flags(bp, info); bnx2x_get_rxnfc()
3320 static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) bnx2x_set_rss_flags() argument
3351 if (CHIP_IS_E1x(bp) && udp_rss_requested) { bnx2x_set_rss_flags()
3358 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bnx2x_set_rss_flags()
3359 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; bnx2x_set_rss_flags()
3363 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); bnx2x_set_rss_flags()
3365 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bnx2x_set_rss_flags()
3366 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; bnx2x_set_rss_flags()
3370 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); bnx2x_set_rss_flags()
3409 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rxnfc() local
3413 return bnx2x_set_rss_flags(bp, info); bnx2x_set_rxnfc()
3428 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_rxfh() local
3438 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); bnx2x_get_rxfh()
3450 indir[i] = ind_table[i] - bp->fp->cl_id; bnx2x_get_rxfh()
3458 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_rxfh() local
3481 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; bnx2x_set_rxfh()
3484 return bnx2x_config_rss_eth(bp, false); bnx2x_set_rxfh()
3496 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_channels() local
3498 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); bnx2x_get_channels()
3499 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_get_channels()
3505 * @bp: bnx2x private structure
3510 static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) bnx2x_change_num_queues() argument
3512 bnx2x_disable_msi(bp); bnx2x_change_num_queues()
3513 bp->num_ethernet_queues = num_rss; bnx2x_change_num_queues()
3514 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; bnx2x_change_num_queues()
3515 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); bnx2x_change_num_queues()
3516 bnx2x_set_int_mode(bp); bnx2x_change_num_queues()
3528 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_channels() local
3535 if (pci_num_vf(bp->pdev)) { bnx2x_set_channels()
3545 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { bnx2x_set_channels()
3551 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { bnx2x_set_channels()
3556 /* Set the requested number of queues in bp context. bnx2x_set_channels()
3561 bnx2x_change_num_queues(bp, channels->combined_count); bnx2x_set_channels()
3564 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_set_channels()
3565 bnx2x_change_num_queues(bp, channels->combined_count); bnx2x_set_channels()
3566 return bnx2x_nic_load(bp, LOAD_NORMAL); bnx2x_set_channels()
3572 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_ts_info() local
3574 if (bp->flags & PTP_SUPPORTED) { bnx2x_get_ts_info()
3582 if (bp->ptp_clock) bnx2x_get_ts_info()
3583 info->phc_index = ptp_clock_index(bp->ptp_clock); bnx2x_get_ts_info()
3665 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) bnx2x_set_ethtool_ops() argument
3667 netdev->ethtool_ops = (IS_PF(bp)) ? bnx2x_set_ethtool_ops()
H A Dbnx2x_dcb.c35 static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
36 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
37 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
40 static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
43 static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
47 static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
51 static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
55 static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, bnx2x_read_data() argument
60 *buff = REG_RD(bp, addr + i); bnx2x_read_data()
63 static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, bnx2x_write_data() argument
68 REG_WR(bp, addr + i, *buff); bnx2x_write_data()
71 static void bnx2x_pfc_set(struct bnx2x *bp) bnx2x_pfc_set() argument
78 bp->dcbx_port_params.ets.num_of_cos; bnx2x_pfc_set()
81 for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) bnx2x_pfc_set()
88 bp->dcbx_port_params.ets.cos_params[i].pri_bitmask bnx2x_pfc_set()
89 & DCBX_PFC_PRI_PAUSE_MASK(bp); bnx2x_pfc_set()
99 if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))) bnx2x_pfc_set()
106 pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); bnx2x_pfc_set()
110 bnx2x_acquire_phy_lock(bp); bnx2x_pfc_set()
111 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; bnx2x_pfc_set()
112 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); bnx2x_pfc_set()
113 bnx2x_release_phy_lock(bp); bnx2x_pfc_set()
116 static void bnx2x_pfc_clear(struct bnx2x *bp) bnx2x_pfc_clear() argument
120 bnx2x_acquire_phy_lock(bp); bnx2x_pfc_clear()
121 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; bnx2x_pfc_clear()
122 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); bnx2x_pfc_clear()
123 bnx2x_release_phy_lock(bp); bnx2x_pfc_clear()
126 static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, bnx2x_dump_dcbx_drv_param() argument
172 static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, bnx2x_dcbx_get_ap_priority() argument
179 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_ap_priority()
193 static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, bnx2x_dcbx_get_ap_feature() argument
197 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_ap_feature()
211 bp->dcbx_port_params.app.enabled = true; bnx2x_dcbx_get_ap_feature()
226 bnx2x_dcbx_get_ap_priority(bp, bnx2x_dcbx_get_ap_feature()
233 bnx2x_dcbx_get_ap_priority(bp, bnx2x_dcbx_get_ap_feature()
239 bp->dcbx_port_params.app.enabled = false; bnx2x_dcbx_get_ap_feature()
245 static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, bnx2x_dcbx_get_ets_feature() argument
252 bp->dcbx_port_params.ets.cos_params; bnx2x_dcbx_get_ets_feature()
263 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { bnx2x_dcbx_get_ets_feature()
270 if (bp->dcbx_port_params.app.enabled && ets->enabled && bnx2x_dcbx_get_ets_feature()
274 bp->dcbx_port_params.ets.enabled = true; bnx2x_dcbx_get_ets_feature()
276 bnx2x_dcbx_get_ets_pri_pg_tbl(bp, bnx2x_dcbx_get_ets_feature()
280 bnx2x_dcbx_get_num_pg_traf_type(bp, bnx2x_dcbx_get_ets_feature()
284 bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, bnx2x_dcbx_get_ets_feature()
289 bp->dcbx_port_params.ets.enabled = false; bnx2x_dcbx_get_ets_feature()
297 static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, bnx2x_dcbx_get_pfc_feature() argument
305 if (bp->dcbx_port_params.app.enabled && pfc->enabled && bnx2x_dcbx_get_pfc_feature()
308 bp->dcbx_port_params.pfc.enabled = true; bnx2x_dcbx_get_pfc_feature()
309 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = bnx2x_dcbx_get_pfc_feature()
313 bp->dcbx_port_params.pfc.enabled = false; bnx2x_dcbx_get_pfc_feature()
314 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; bnx2x_dcbx_get_pfc_feature()
319 static void bnx2x_dcbx_map_nw(struct bnx2x *bp) bnx2x_dcbx_map_nw() argument
323 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_map_nw()
326 bp->dcbx_port_params.ets.cos_params; bnx2x_dcbx_map_nw()
333 for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { bnx2x_dcbx_map_nw()
344 static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, bnx2x_get_dcbx_drv_param() argument
348 bnx2x_dcbx_get_ap_feature(bp, &features->app, error); bnx2x_get_dcbx_drv_param()
350 bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); bnx2x_get_dcbx_drv_param()
352 bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); bnx2x_get_dcbx_drv_param()
354 bnx2x_dcbx_map_nw(bp); bnx2x_get_dcbx_drv_param()
358 static int bnx2x_dcbx_read_mib(struct bnx2x *bp, bnx2x_dcbx_read_mib() argument
379 offset += BP_PORT(bp) * mib_size; bnx2x_dcbx_read_mib()
382 bnx2x_read_data(bp, base_mib_addr, offset, mib_size); bnx2x_dcbx_read_mib()
411 static void bnx2x_pfc_set_pfc(struct bnx2x *bp) bnx2x_pfc_set_pfc() argument
413 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_pfc_set_pfc()
414 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_pfc_set_pfc()
417 if (bp->dcbx_port_params.pfc.enabled && bnx2x_pfc_set_pfc()
418 (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured)) bnx2x_pfc_set_pfc()
423 bnx2x_pfc_set(bp); bnx2x_pfc_set_pfc()
425 bnx2x_pfc_clear(bp); bnx2x_pfc_set_pfc()
428 int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) bnx2x_dcbx_stop_hw_tx() argument
433 func_params.f_obj = &bp->func_obj; bnx2x_dcbx_stop_hw_tx()
441 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_dcbx_stop_hw_tx()
450 int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) bnx2x_dcbx_resume_hw_tx() argument
457 func_params.f_obj = &bp->func_obj; bnx2x_dcbx_resume_hw_tx()
463 bnx2x_dcbx_fw_struct(bp, tx_params); bnx2x_dcbx_resume_hw_tx()
467 rc = bnx2x_func_state_change(bp, &func_params); bnx2x_dcbx_resume_hw_tx()
476 static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) bnx2x_dcbx_2cos_limit_update_ets_config() argument
478 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); bnx2x_dcbx_2cos_limit_update_ets_config()
523 bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); bnx2x_dcbx_2cos_limit_update_ets_config()
526 rc = bnx2x_ets_strict(&bp->link_params, 0); bnx2x_dcbx_2cos_limit_update_ets_config()
529 rc = bnx2x_ets_strict(&bp->link_params, 1); bnx2x_dcbx_2cos_limit_update_ets_config()
538 static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) bnx2x_dcbx_update_ets_config() argument
540 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); bnx2x_dcbx_update_ets_config()
569 if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, bnx2x_dcbx_update_ets_config()
572 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); bnx2x_dcbx_update_ets_config()
576 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) bnx2x_dcbx_update_ets_params() argument
578 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_update_ets_params()
579 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_dcbx_update_ets_params()
582 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); bnx2x_dcbx_update_ets_params()
584 if (!bp->dcbx_port_params.ets.enabled || bnx2x_dcbx_update_ets_params()
585 ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)) bnx2x_dcbx_update_ets_params()
588 if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_update_ets_params()
589 bnx2x_dcbx_update_ets_config(bp); bnx2x_dcbx_update_ets_params()
591 bnx2x_dcbx_2cos_limit_update_ets_config(bp); bnx2x_dcbx_update_ets_params()
595 static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) bnx2x_dcbx_read_shmem_remote_mib() argument
598 u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); bnx2x_dcbx_read_shmem_remote_mib()
609 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, bnx2x_dcbx_read_shmem_remote_mib()
618 bp->dcbx_remote_feat = remote_mib.features; bnx2x_dcbx_read_shmem_remote_mib()
619 bp->dcbx_remote_flags = remote_mib.flags; bnx2x_dcbx_read_shmem_remote_mib()
624 static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) bnx2x_dcbx_read_shmem_neg_results() argument
627 u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); bnx2x_dcbx_read_shmem_neg_results()
637 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, bnx2x_dcbx_read_shmem_neg_results()
646 bp->dcbx_local_feat = local_mib.features; bnx2x_dcbx_read_shmem_neg_results()
647 bp->dcbx_error = local_mib.error; bnx2x_dcbx_read_shmem_neg_results()
672 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) bnx2x_dcbnl_update_applist() argument
678 &bp->dcbx_local_feat.app.app_pri_tbl[i]; bnx2x_dcbnl_update_applist()
689 err = dcb_setapp(bp->dev, &app); bnx2x_dcbnl_update_applist()
697 static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) bnx2x_dcbx_update_tc_mapping() argument
700 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { bnx2x_dcbx_update_tc_mapping()
702 if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask bnx2x_dcbx_update_tc_mapping()
704 bp->prio_to_cos[prio] = cos; bnx2x_dcbx_update_tc_mapping()
715 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); bnx2x_dcbx_update_tc_mapping()
718 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_dcbx_set_params() argument
729 bnx2x_dcbnl_update_applist(bp, true); bnx2x_dcbx_set_params()
732 if (bnx2x_dcbx_read_shmem_remote_mib(bp)) bnx2x_dcbx_set_params()
736 if (bnx2x_dcbx_read_shmem_neg_results(bp)) bnx2x_dcbx_set_params()
739 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_set_params()
740 bp->dcbx_error); bnx2x_dcbx_set_params()
742 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_set_params()
743 bp->dcbx_error); bnx2x_dcbx_set_params()
746 bnx2x_update_drv_flags(bp, bnx2x_dcbx_set_params()
753 bnx2x_dcbnl_update_applist(bp, false); bnx2x_dcbx_set_params()
759 bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_set_params()
765 if (IS_MF(bp)) bnx2x_dcbx_set_params()
766 bnx2x_link_sync_notify(bp); bnx2x_dcbx_set_params()
768 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); bnx2x_dcbx_set_params()
773 bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_set_params()
775 bnx2x_dcbx_update_ets_params(bp); bnx2x_dcbx_set_params()
778 bnx2x_set_local_cmng(bp); bnx2x_dcbx_set_params()
782 bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); bnx2x_dcbx_set_params()
787 dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); bnx2x_dcbx_set_params()
795 #define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
796 BP_PORT(bp)*sizeof(struct lldp_admin_mib))
798 static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, bnx2x_dcbx_admin_mib_updated_params() argument
803 u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); bnx2x_dcbx_admin_mib_updated_params()
807 struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; bnx2x_dcbx_admin_mib_updated_params()
812 bnx2x_read_data(bp, (u32 *)&admin_mib, offset, bnx2x_dcbx_admin_mib_updated_params()
815 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) bnx2x_dcbx_admin_mib_updated_params()
919 bnx2x_write_data(bp, (u32 *)&admin_mib, offset, bnx2x_dcbx_admin_mib_updated_params()
923 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) bnx2x_dcbx_set_state() argument
925 if (!CHIP_IS_E1x(bp)) { bnx2x_dcbx_set_state()
926 bp->dcb_state = dcb_on; bnx2x_dcbx_set_state()
927 bp->dcbx_enabled = dcbx_enabled; bnx2x_dcbx_set_state()
929 bp->dcb_state = false; bnx2x_dcbx_set_state()
930 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; bnx2x_dcbx_set_state()
940 void bnx2x_dcbx_init_params(struct bnx2x *bp) bnx2x_dcbx_init_params() argument
942 bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ bnx2x_dcbx_init_params()
943 bp->dcbx_config_params.admin_ets_willing = 1; bnx2x_dcbx_init_params()
944 bp->dcbx_config_params.admin_pfc_willing = 1; bnx2x_dcbx_init_params()
945 bp->dcbx_config_params.overwrite_settings = 1; bnx2x_dcbx_init_params()
946 bp->dcbx_config_params.admin_ets_enable = 1; bnx2x_dcbx_init_params()
947 bp->dcbx_config_params.admin_pfc_enable = 1; bnx2x_dcbx_init_params()
948 bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; bnx2x_dcbx_init_params()
949 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbx_init_params()
950 bp->dcbx_config_params.admin_pfc_tx_enable = 1; bnx2x_dcbx_init_params()
951 bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bnx2x_dcbx_init_params()
952 bp->dcbx_config_params.admin_ets_reco_valid = 1; bnx2x_dcbx_init_params()
953 bp->dcbx_config_params.admin_app_priority_willing = 1; bnx2x_dcbx_init_params()
954 bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; bnx2x_dcbx_init_params()
955 bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; bnx2x_dcbx_init_params()
956 bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; bnx2x_dcbx_init_params()
957 bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; bnx2x_dcbx_init_params()
958 bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; bnx2x_dcbx_init_params()
959 bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; bnx2x_dcbx_init_params()
960 bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; bnx2x_dcbx_init_params()
961 bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; bnx2x_dcbx_init_params()
962 bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; bnx2x_dcbx_init_params()
963 bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; bnx2x_dcbx_init_params()
964 bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; bnx2x_dcbx_init_params()
965 bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; bnx2x_dcbx_init_params()
966 bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; bnx2x_dcbx_init_params()
967 bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; bnx2x_dcbx_init_params()
968 bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; bnx2x_dcbx_init_params()
969 bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; bnx2x_dcbx_init_params()
970 bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; bnx2x_dcbx_init_params()
971 bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; bnx2x_dcbx_init_params()
972 bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; bnx2x_dcbx_init_params()
973 bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; bnx2x_dcbx_init_params()
974 bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; bnx2x_dcbx_init_params()
975 bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; bnx2x_dcbx_init_params()
976 bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; bnx2x_dcbx_init_params()
977 bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; bnx2x_dcbx_init_params()
978 bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; bnx2x_dcbx_init_params()
979 bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; bnx2x_dcbx_init_params()
980 bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; bnx2x_dcbx_init_params()
981 bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; bnx2x_dcbx_init_params()
982 bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; bnx2x_dcbx_init_params()
983 bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; bnx2x_dcbx_init_params()
984 bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; bnx2x_dcbx_init_params()
985 bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; bnx2x_dcbx_init_params()
986 bp->dcbx_config_params.admin_pfc_bitmap = 0x0; bnx2x_dcbx_init_params()
987 bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; bnx2x_dcbx_init_params()
988 bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; bnx2x_dcbx_init_params()
989 bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; bnx2x_dcbx_init_params()
990 bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; bnx2x_dcbx_init_params()
991 bp->dcbx_config_params.admin_default_priority = 0; bnx2x_dcbx_init_params()
994 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) bnx2x_dcbx_init() argument
999 if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) bnx2x_dcbx_init()
1002 if (bp->dcbx_enabled <= 0) bnx2x_dcbx_init()
1010 DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bnx2x_dcbx_init()
1011 bp->dcb_state, bp->port.pmf); bnx2x_dcbx_init()
1013 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bnx2x_dcbx_init()
1014 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { bnx2x_dcbx_init()
1016 SHMEM2_RD(bp, dcbx_lldp_params_offset); bnx2x_dcbx_init()
1021 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); bnx2x_dcbx_init()
1027 bnx2x_acquire_hw_lock(bp, bnx2x_dcbx_init()
1030 bnx2x_dcbx_admin_mib_updated_params(bp, bnx2x_dcbx_init()
1034 bnx2x_fw_command(bp, bnx2x_dcbx_init()
1039 bnx2x_release_hw_lock(bp, bnx2x_dcbx_init()
1045 bnx2x_dcbx_print_cos_params(struct bnx2x *bp, bnx2x_dcbx_print_cos_params() argument
1055 bp->dcbx_port_params.pfc.priority_non_pauseable_mask); bnx2x_dcbx_print_cos_params()
1057 for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { bnx2x_dcbx_print_cos_params()
1060 cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); bnx2x_dcbx_print_cos_params()
1064 cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); bnx2x_dcbx_print_cos_params()
1068 cos, bp->dcbx_port_params.ets.cos_params[cos].strict); bnx2x_dcbx_print_cos_params()
1072 cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); bnx2x_dcbx_print_cos_params()
1087 static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, bnx2x_dcbx_get_num_pg_traf_type() argument
1093 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_get_num_pg_traf_type()
1134 static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, bnx2x_dcbx_ets_disabled_entry_data() argument
1140 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_ets_disabled_entry_data()
1146 static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, bnx2x_dcbx_add_to_cos_bw() argument
1156 static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, bnx2x_dcbx_separate_pauseable_from_non() argument
1172 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_separate_pauseable_from_non()
1175 if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { bnx2x_dcbx_separate_pauseable_from_non()
1182 pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. bnx2x_dcbx_separate_pauseable_from_non()
1186 bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], bnx2x_dcbx_separate_pauseable_from_non()
1204 static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params() argument
1213 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1216 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1225 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1237 if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1247 } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1271 bnx2x_dcbx_ets_disabled_entry_data(bp, bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1275 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1281 if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1282 DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1307 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1310 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params()
1338 struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1353 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1354 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1356 IS_DCBX_PFC_PRI_MIX_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1361 bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1363 bp->dcbx_port_params.ets.enabled = false; bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1371 if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1393 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params()
1414 struct bnx2x *bp, bnx2x_dcbx_join_pgs()
1465 struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1485 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1486 bnx2x_dcbx_separate_pauseable_from_non(bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1507 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1510 pri_tested = 1 << bp->dcbx_port_params. bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1512 pg_entry = (u8)pg_pri_orginal_spread[bp-> bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1527 bnx2x_dcbx_add_to_cos_bw(bp, bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params()
1544 static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params() argument
1558 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params()
1566 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params()
1577 bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params()
1587 bnx2x_dcbx_ets_disabled_entry_data(bp, bnx2x_dcbx_2cos_limit_cee_fill_cos_params()
1592 static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, bnx2x_dcbx_spread_strict_pri() argument
1613 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_spread_strict_pri()
1621 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_spread_strict_pri()
1641 static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, bnx2x_dcbx_cee_fill_strict_pri() argument
1647 if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, bnx2x_dcbx_cee_fill_strict_pri()
1656 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_cee_fill_strict_pri()
1664 static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_cee_fill_cos_params() argument
1681 if (bnx2x_dcbx_join_pgs(bp, ets, help_data, bnx2x_dcbx_cee_fill_cos_params()
1684 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, bnx2x_dcbx_cee_fill_cos_params()
1699 data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, bnx2x_dcbx_cee_fill_cos_params()
1713 entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, bnx2x_dcbx_cee_fill_cos_params()
1721 static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, bnx2x_dcbx_fill_cos_params() argument
1752 if (CHIP_IS_E3B0(bp)) bnx2x_dcbx_fill_cos_params()
1753 bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, bnx2x_dcbx_fill_cos_params()
1756 bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, bnx2x_dcbx_fill_cos_params()
1765 &bp->dcbx_port_params.ets.cos_params[i]; bnx2x_dcbx_fill_cos_params()
1778 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { bnx2x_dcbx_fill_cos_params()
1781 DCBX_PFC_PRI_GET_NON_PAUSE(bp, bnx2x_dcbx_fill_cos_params()
1787 DCBX_PFC_PRI_GET_PAUSE(bp, bnx2x_dcbx_fill_cos_params()
1803 bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; bnx2x_dcbx_fill_cos_params()
1806 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, bnx2x_dcbx_get_ets_pri_pg_tbl() argument
1820 static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, bnx2x_dcbx_fw_struct() argument
1826 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; bnx2x_dcbx_fw_struct()
1827 int mfw_configured = SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_fw_struct()
1828 GET_FLAGS(SHMEM2_RD(bp, drv_flags), bnx2x_dcbx_fw_struct()
1834 if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured) bnx2x_dcbx_fw_struct()
1841 pfc_fw_cfg->dcb_version = ++bp->dcb_version; bnx2x_dcbx_fw_struct()
1851 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) bnx2x_dcbx_fw_struct()
1852 if (bp->dcbx_port_params.ets.cos_params[cos]. bnx2x_dcbx_fw_struct()
1862 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); bnx2x_dcbx_fw_struct()
1865 void bnx2x_dcbx_pmf_update(struct bnx2x *bp) bnx2x_dcbx_pmf_update() argument
1868 * read it from shmem and update bp and netdev accordingly bnx2x_dcbx_pmf_update()
1870 if (SHMEM2_HAS(bp, drv_flags) && bnx2x_dcbx_pmf_update()
1871 GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) { bnx2x_dcbx_pmf_update()
1873 if (bnx2x_dcbx_read_shmem_neg_results(bp)) bnx2x_dcbx_pmf_update()
1876 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_pmf_update()
1877 bp->dcbx_error); bnx2x_dcbx_pmf_update()
1878 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bnx2x_dcbx_pmf_update()
1879 bp->dcbx_error); bnx2x_dcbx_pmf_update()
1884 bnx2x_dcbnl_update_applist(bp, false); bnx2x_dcbx_pmf_update()
1888 dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); bnx2x_dcbx_pmf_update()
1894 bnx2x_dcbx_update_tc_mapping(bp); bnx2x_dcbx_pmf_update()
1904 static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) bnx2x_dcbnl_set_valid() argument
1909 return bp->dcb_state && bp->dcbx_mode_uset; bnx2x_dcbnl_set_valid()
1914 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_state() local
1915 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); bnx2x_dcbnl_get_state()
1916 return bp->dcb_state; bnx2x_dcbnl_get_state()
1921 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_state() local
1925 if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) || bnx2x_dcbnl_set_state()
1926 (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) { bnx2x_dcbnl_set_state()
1931 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); bnx2x_dcbnl_set_state()
1938 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_perm_hw_addr() local
1944 if (CNIC_LOADED(bp)) bnx2x_dcbnl_get_perm_hw_addr()
1946 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, bnx2x_dcbnl_get_perm_hw_addr()
1954 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_tccfg_tx() local
1957 if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) bnx2x_dcbnl_set_pg_tccfg_tx()
1973 bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; bnx2x_dcbnl_set_pg_tccfg_tx()
1974 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbnl_set_pg_tccfg_tx()
1980 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_bwgcfg_tx() local
1983 if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) bnx2x_dcbnl_set_pg_bwgcfg_tx()
1986 bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; bnx2x_dcbnl_set_pg_bwgcfg_tx()
1987 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; bnx2x_dcbnl_set_pg_bwgcfg_tx()
1994 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_tccfg_rx() local
2001 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pg_bwgcfg_rx() local
2009 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_tccfg_tx() local
2026 if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) bnx2x_dcbnl_get_pg_tccfg_tx()
2029 *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); bnx2x_dcbnl_get_pg_tccfg_tx()
2035 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_bwgcfg_tx() local
2040 if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) bnx2x_dcbnl_get_pg_bwgcfg_tx()
2043 *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); bnx2x_dcbnl_get_pg_bwgcfg_tx()
2050 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_tccfg_rx() local
2059 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pg_bwgcfg_rx() local
2068 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pfc_cfg() local
2071 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) bnx2x_dcbnl_set_pfc_cfg()
2075 bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio); bnx2x_dcbnl_set_pfc_cfg()
2076 bp->dcbx_config_params.admin_pfc_tx_enable = 1; bnx2x_dcbnl_set_pfc_cfg()
2078 bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio); bnx2x_dcbnl_set_pfc_cfg()
2085 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pfc_cfg() local
2090 if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) bnx2x_dcbnl_get_pfc_cfg()
2093 *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; bnx2x_dcbnl_get_pfc_cfg()
2098 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_all() local
2102 if (!bnx2x_dcbnl_set_valid(bp)) bnx2x_dcbnl_set_all()
2105 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { bnx2x_dcbnl_set_all()
2106 netdev_err(bp->dev, bnx2x_dcbnl_set_all()
2110 if (netif_running(bp->dev)) { bnx2x_dcbnl_set_all()
2111 bnx2x_update_drv_flags(bp, bnx2x_dcbnl_set_all()
2114 bnx2x_dcbx_init(bp, true); bnx2x_dcbnl_set_all()
2123 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_cap() local
2126 if (bp->dcb_state) { bnx2x_dcbnl_get_cap()
2168 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_numtcs() local
2173 if (bp->dcb_state) { bnx2x_dcbnl_get_numtcs()
2176 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : bnx2x_dcbnl_get_numtcs()
2180 *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : bnx2x_dcbnl_get_numtcs()
2198 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_numtcs() local
2205 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_pfc_state() local
2206 DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); bnx2x_dcbnl_get_pfc_state()
2208 if (!bp->dcb_state) bnx2x_dcbnl_get_pfc_state()
2211 return bp->dcbx_local_feat.pfc.enabled; bnx2x_dcbnl_get_pfc_state()
2216 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_pfc_state() local
2219 if (!bnx2x_dcbnl_set_valid(bp)) bnx2x_dcbnl_set_pfc_state()
2222 bp->dcbx_config_params.admin_pfc_tx_enable = bnx2x_dcbnl_set_pfc_state()
2223 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); bnx2x_dcbnl_set_pfc_state()
2271 static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) bnx2x_set_admin_app_up() argument
2278 &bp->dcbx_config_params.admin_priority_app_table[i]; bnx2x_set_admin_app_up()
2287 bp->dcbx_config_params. bnx2x_set_admin_app_up()
2292 &bp->dcbx_config_params.admin_priority_app_table[ff], bnx2x_set_admin_app_up()
2302 bp->dcbx_config_params.admin_application_priority_tx_enable = 1; bnx2x_set_admin_app_up()
2310 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_app_up() local
2315 if (!bnx2x_dcbnl_set_valid(bp)) { bnx2x_dcbnl_set_app_up()
2329 return bnx2x_set_admin_app_up(bp, idtype, idval, up); bnx2x_dcbnl_set_app_up()
2334 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_dcbx() local
2339 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) bnx2x_dcbnl_get_dcbx()
2347 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_dcbx() local
2358 if (bp->dcb_state != BNX2X_DCB_STATE_ON) { bnx2x_dcbnl_set_dcbx()
2364 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; bnx2x_dcbnl_set_dcbx()
2366 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; bnx2x_dcbnl_set_dcbx()
2368 bp->dcbx_mode_uset = true; bnx2x_dcbnl_set_dcbx()
2375 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_get_featcfg() local
2380 if (bp->dcb_state) { bnx2x_dcbnl_get_featcfg()
2384 if (bp->dcbx_local_feat.ets.enabled) bnx2x_dcbnl_get_featcfg()
2386 if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | bnx2x_dcbnl_get_featcfg()
2391 if (bp->dcbx_local_feat.pfc.enabled) bnx2x_dcbnl_get_featcfg()
2393 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | bnx2x_dcbnl_get_featcfg()
2399 if (bp->dcbx_local_feat.app.enabled) bnx2x_dcbnl_get_featcfg()
2401 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | bnx2x_dcbnl_get_featcfg()
2422 struct bnx2x *bp = netdev_priv(netdev); bnx2x_dcbnl_set_featcfg() local
2428 if (bnx2x_dcbnl_set_valid(bp)) { bnx2x_dcbnl_set_featcfg()
2431 bp->dcbx_config_params.admin_ets_enable = bnx2x_dcbnl_set_featcfg()
2433 bp->dcbx_config_params.admin_ets_willing = bnx2x_dcbnl_set_featcfg()
2437 bp->dcbx_config_params.admin_pfc_enable = bnx2x_dcbnl_set_featcfg()
2439 bp->dcbx_config_params.admin_pfc_willing = bnx2x_dcbnl_set_featcfg()
2444 bp->dcbx_config_params.admin_app_priority_willing = bnx2x_dcbnl_set_featcfg()
2464 struct bnx2x *bp = netdev_priv(netdev); bnx2x_peer_appinfo() local
2468 info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; bnx2x_peer_appinfo()
2469 info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; bnx2x_peer_appinfo()
2473 if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & bnx2x_peer_appinfo()
2483 struct bnx2x *bp = netdev_priv(netdev); bnx2x_peer_apptable() local
2489 &bp->dcbx_remote_feat.app.app_pri_tbl[i]; bnx2x_peer_apptable()
2503 struct bnx2x *bp = netdev_priv(netdev); bnx2x_cee_peer_getpg() local
2505 pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; bnx2x_cee_peer_getpg()
2509 DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); bnx2x_cee_peer_getpg()
2511 DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); bnx2x_cee_peer_getpg()
2519 struct bnx2x *bp = netdev_priv(netdev); bnx2x_cee_peer_getpfc() local
2520 pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; bnx2x_cee_peer_getpfc()
2521 pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; bnx2x_cee_peer_getpfc()
1337 bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params() argument
1413 bnx2x_dcbx_join_pgs( struct bnx2x *bp, struct dcbx_ets_feature *ets, struct pg_help_data *pg_help_data, u8 required_num_of_pg) bnx2x_dcbx_join_pgs() argument
1464 bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, u32 pri_join_mask, u8 num_of_dif_pri) bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params() argument
H A Dbnx2x_sriov.c29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, storm_memset_vf_to_pf() argument
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), storm_memset_vf_to_pf()
48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, storm_memset_func_en() argument
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), storm_memset_func_en()
61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) bnx2x_vf_idx_by_abs_fid() argument
65 for_each_vf(bp, idx) bnx2x_vf_idx_by_abs_fid()
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) bnx2x_vf_idx_by_abs_fid()
72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) bnx2x_vf_by_abs_fid() argument
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); bnx2x_vf_by_abs_fid()
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; bnx2x_vf_by_abs_fid()
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_igu_ack_sb() argument
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); bnx2x_vf_igu_ack_sb()
108 REG_WR(bp, igu_addr_ctl, ctl); bnx2x_vf_igu_ack_sb()
113 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, bnx2x_validate_vf_sp_objs() argument
128 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfop_qctor_dump_tx() argument
144 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfop_qctor_dump_rx() argument
168 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, bnx2x_vfop_qctor_prep() argument
233 static int bnx2x_vf_queue_create(struct bnx2x *bp, bnx2x_vf_queue_create() argument
247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == bnx2x_vf_queue_create()
255 rc = bnx2x_queue_state_change(bp, q_params); bnx2x_vf_queue_create()
262 rc = bnx2x_queue_state_change(bp, q_params); bnx2x_vf_queue_create()
267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), bnx2x_vf_queue_create()
273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_queue_destroy() argument
289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == bnx2x_vf_queue_destroy()
298 rc = bnx2x_queue_state_change(bp, &q_params); bnx2x_vf_queue_destroy()
315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) bnx2x_vf_set_igu_info() argument
317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_set_igu_info()
320 if (!BP_VFDB(bp)->first_vf_igu_entry) bnx2x_vf_set_igu_info()
321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; bnx2x_vf_set_igu_info()
330 BP_VFDB(bp)->vf_sbs_pool++; bnx2x_vf_set_igu_info()
333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, bnx2x_vf_vlan_credit() argument
341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); bnx2x_vf_vlan_credit()
349 bnx2x_vlan_mac_h_read_unlock(bp, obj); bnx2x_vf_vlan_credit()
354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_vlan_mac_clear() argument
384 rc = ramrod.vlan_mac_obj->delete_all(bp, bnx2x_vf_vlan_mac_clear()
398 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, bnx2x_vf_mac_vlan_config() argument
436 rc = bnx2x_config_vlan_mac(bp, &ramrod); bnx2x_vf_mac_vlan_config()
450 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mac_vlan_config_list() argument
458 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_vf_mac_vlan_config_list()
463 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, bnx2x_vf_mac_vlan_config_list()
475 bnx2x_vf_mac_vlan_config(bp, vf, qid, bnx2x_vf_mac_vlan_config_list()
487 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, bnx2x_vf_queue_setup() argument
494 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); bnx2x_vf_queue_setup()
499 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, bnx2x_vf_queue_setup()
507 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_queue_flr() argument
516 bnx2x_validate_vf_sp_objs(bp, vf, false)) { bnx2x_vf_queue_flr()
517 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, bnx2x_vf_queue_flr()
521 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, bnx2x_vf_queue_flr()
525 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, bnx2x_vf_queue_flr()
540 rc = bnx2x_queue_state_change(bp, &qstate); bnx2x_vf_queue_flr()
551 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mcast() argument
579 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); bnx2x_vf_mcast()
597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); bnx2x_vf_mcast()
606 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, bnx2x_vf_prep_rx_mode() argument
616 ramrod->rx_mode_obj = &bp->rx_mode_obj; bnx2x_vf_prep_rx_mode()
627 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); bnx2x_vf_prep_rx_mode()
628 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); bnx2x_vf_prep_rx_mode()
631 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_rxmode() argument
638 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); bnx2x_vf_rxmode()
641 return bnx2x_config_rx_mode(bp, &ramrod); bnx2x_vf_rxmode()
644 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) bnx2x_vf_queue_teardown() argument
652 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); bnx2x_vf_queue_teardown()
657 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { bnx2x_vf_queue_teardown()
658 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, bnx2x_vf_queue_teardown()
663 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, bnx2x_vf_queue_teardown()
668 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, bnx2x_vf_queue_teardown()
673 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); bnx2x_vf_queue_teardown()
680 rc = bnx2x_vf_queue_destroy(bp, vf, qid); bnx2x_vf_queue_teardown()
698 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) bnx2x_vf_enable_internal() argument
700 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); bnx2x_vf_enable_internal()
704 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_semi_clear_err() argument
706 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err()
707 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err()
708 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err()
709 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); bnx2x_vf_semi_clear_err()
712 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_pglue_clear_err() argument
714 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; bnx2x_vf_pglue_clear_err()
731 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); bnx2x_vf_pglue_clear_err()
734 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_igu_reset() argument
740 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_igu_reset()
742 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); bnx2x_vf_igu_reset()
743 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); bnx2x_vf_igu_reset()
744 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); bnx2x_vf_igu_reset()
745 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); bnx2x_vf_igu_reset()
746 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); bnx2x_vf_igu_reset()
747 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); bnx2x_vf_igu_reset()
749 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); bnx2x_vf_igu_reset()
752 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; bnx2x_vf_igu_reset()
753 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); bnx2x_vf_igu_reset()
759 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_igu_reset()
766 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); bnx2x_vf_igu_reset()
769 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, bnx2x_vf_igu_reset()
773 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, bnx2x_vf_igu_reset()
778 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_enable_access() argument
781 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); bnx2x_vf_enable_access()
782 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); bnx2x_vf_enable_access()
785 bnx2x_vf_semi_clear_err(bp, abs_vfid); bnx2x_vf_enable_access()
786 bnx2x_vf_pglue_clear_err(bp, abs_vfid); bnx2x_vf_enable_access()
789 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); bnx2x_vf_enable_access()
791 bnx2x_vf_enable_internal(bp, true); bnx2x_vf_enable_access()
792 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_enable_access()
795 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_enable_traffic() argument
798 bnx2x_vf_igu_reset(bp, vf); bnx2x_vf_enable_traffic()
801 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_enable_traffic()
802 REG_WR(bp, PBF_REG_DISABLE_VF, 0); bnx2x_vf_enable_traffic()
803 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_enable_traffic()
806 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_is_pcie_pending() argument
809 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_is_pcie_pending()
820 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_flr_clnup_epilog() argument
823 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) bnx2x_vf_flr_clnup_epilog()
833 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_iov_static_resc() argument
852 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_free_resc() argument
855 bnx2x_iov_static_resc(bp, vf); bnx2x_vf_free_resc()
859 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_flr_clnup_hw() argument
861 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); bnx2x_vf_flr_clnup_hw()
864 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_flr_clnup_hw()
865 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, bnx2x_vf_flr_clnup_hw()
868 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_flr_clnup_hw()
871 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), bnx2x_vf_flr_clnup_hw()
876 bnx2x_tx_hw_flushed(bp, poll_cnt); bnx2x_vf_flr_clnup_hw()
879 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_flr() argument
889 rc = bnx2x_vf_queue_flr(bp, vf, i); bnx2x_vf_flr()
895 bnx2x_vf_mcast(bp, vf, NULL, 0, true); bnx2x_vf_flr()
898 bnx2x_vf_flr_clnup_hw(bp, vf); bnx2x_vf_flr()
901 bnx2x_vf_free_resc(bp, vf); bnx2x_vf_flr()
904 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); bnx2x_vf_flr()
911 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) bnx2x_vf_flr_clnup() argument
916 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { bnx2x_vf_flr_clnup()
918 if (bnx2x_vf(bp, i, state) != VF_RESET || bnx2x_vf_flr_clnup()
919 !bnx2x_vf(bp, i, flr_clnup_stage)) bnx2x_vf_flr_clnup()
923 i, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_flr_clnup()
925 vf = BP_VF(bp, i); bnx2x_vf_flr_clnup()
928 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); bnx2x_vf_flr_clnup()
931 bnx2x_vf_flr(bp, vf); bnx2x_vf_flr_clnup()
935 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); bnx2x_vf_flr_clnup()
946 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); bnx2x_vf_flr_clnup()
948 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], bnx2x_vf_flr_clnup()
949 bp->vfdb->flrd_vfs[i]); bnx2x_vf_flr_clnup()
951 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); bnx2x_vf_flr_clnup()
957 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); bnx2x_vf_flr_clnup()
960 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) bnx2x_vf_handle_flr_event() argument
966 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); bnx2x_vf_handle_flr_event()
970 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); bnx2x_vf_handle_flr_event()
972 for_each_vf(bp, i) { for_each_vf()
973 struct bnx2x_virtf *vf = BP_VF(bp, i); for_each_vf()
977 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); for_each_vf()
979 reset = bp->vfdb->flrd_vfs[1] & for_each_vf()
994 bnx2x_vf_flr_clnup(bp);
998 void bnx2x_iov_init_dq(struct bnx2x *bp) bnx2x_iov_init_dq() argument
1000 if (!IS_SRIOV(bp)) bnx2x_iov_init_dq()
1004 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); bnx2x_iov_init_dq()
1005 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); bnx2x_iov_init_dq()
1010 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); bnx2x_iov_init_dq()
1013 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); bnx2x_iov_init_dq()
1018 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); bnx2x_iov_init_dq()
1024 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); bnx2x_iov_init_dq()
1025 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); bnx2x_iov_init_dq()
1026 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); bnx2x_iov_init_dq()
1027 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); bnx2x_iov_init_dq()
1032 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); bnx2x_iov_init_dq()
1035 void bnx2x_iov_init_dmae(struct bnx2x *bp) bnx2x_iov_init_dmae() argument
1037 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) bnx2x_iov_init_dmae()
1038 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); bnx2x_iov_init_dmae()
1041 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) bnx2x_vf_bus() argument
1043 struct pci_dev *dev = bp->pdev; bnx2x_vf_bus()
1044 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_bus()
1050 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) bnx2x_vf_devfn() argument
1052 struct pci_dev *dev = bp->pdev; bnx2x_vf_devfn()
1053 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_devfn()
1058 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_set_bars() argument
1061 struct pci_dev *dev = bp->pdev; bnx2x_vf_set_bars()
1062 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_set_bars()
1080 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) bnx2x_get_vf_igu_cam_info() argument
1088 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); bnx2x_get_vf_igu_cam_info()
1094 else if (current_pf == BP_FUNC(bp)) bnx2x_get_vf_igu_cam_info()
1095 bnx2x_vf_set_igu_info(bp, sb_id, bnx2x_get_vf_igu_cam_info()
1103 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); bnx2x_get_vf_igu_cam_info()
1104 return BP_VFDB(bp)->vf_sbs_pool; bnx2x_get_vf_igu_cam_info()
1107 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) __bnx2x_iov_free_vfdb() argument
1109 if (bp->vfdb) { __bnx2x_iov_free_vfdb()
1110 kfree(bp->vfdb->vfqs); __bnx2x_iov_free_vfdb()
1111 kfree(bp->vfdb->vfs); __bnx2x_iov_free_vfdb()
1112 kfree(bp->vfdb); __bnx2x_iov_free_vfdb()
1114 bp->vfdb = NULL; __bnx2x_iov_free_vfdb()
1117 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) bnx2x_sriov_pci_cfg_info() argument
1120 struct pci_dev *dev = bp->pdev; bnx2x_sriov_pci_cfg_info()
1142 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) bnx2x_sriov_info() argument
1150 if (bnx2x_sriov_pci_cfg_info(bp, iov)) bnx2x_sriov_info()
1157 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); bnx2x_sriov_info()
1159 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); bnx2x_sriov_info()
1163 BP_FUNC(bp), bnx2x_sriov_info()
1171 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_iov_init_one() argument
1176 struct pci_dev *dev = bp->pdev; bnx2x_iov_init_one()
1178 bp->vfdb = NULL; bnx2x_iov_init_one()
1181 if (IS_VF(bp)) bnx2x_iov_init_one()
1189 if (CHIP_IS_E1x(bp)) bnx2x_iov_init_one()
1197 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { bnx2x_iov_init_one()
1199 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); bnx2x_iov_init_one()
1212 if (!bnx2x_ari_enabled(bp->pdev)) { bnx2x_iov_init_one()
1218 if (CHIP_INT_MODE_IS_BC(bp)) { bnx2x_iov_init_one()
1224 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); bnx2x_iov_init_one()
1225 if (!bp->vfdb) { bnx2x_iov_init_one()
1236 iov = &(bp->vfdb->sriov); bnx2x_iov_init_one()
1237 err = bnx2x_sriov_info(bp, iov); bnx2x_iov_init_one()
1251 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * bnx2x_iov_init_one()
1252 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); bnx2x_iov_init_one()
1253 if (!bp->vfdb->vfs) { bnx2x_iov_init_one()
1260 for_each_vf(bp, i) { for_each_vf()
1261 bnx2x_vf(bp, i, index) = i; for_each_vf()
1262 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; for_each_vf()
1263 bnx2x_vf(bp, i, state) = VF_FREE; for_each_vf()
1264 mutex_init(&bnx2x_vf(bp, i, op_mutex)); for_each_vf()
1265 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; for_each_vf()
1269 if (!bnx2x_get_vf_igu_cam_info(bp)) {
1276 bp->vfdb->vfqs = kzalloc(
1280 if (!bp->vfdb->vfqs) {
1287 mutex_init(&bp->vfdb->event_mutex);
1289 mutex_init(&bp->vfdb->bulletin_mutex);
1291 if (SHMEM2_HAS(bp, sriov_switch_mode))
1292 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1297 __bnx2x_iov_free_vfdb(bp);
1301 void bnx2x_iov_remove_one(struct bnx2x *bp) bnx2x_iov_remove_one() argument
1306 if (!IS_SRIOV(bp)) bnx2x_iov_remove_one()
1309 bnx2x_disable_sriov(bp); bnx2x_iov_remove_one()
1312 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { bnx2x_iov_remove_one()
1313 bnx2x_pretend_func(bp, bnx2x_iov_remove_one()
1314 HW_VF_HANDLE(bp, bnx2x_iov_remove_one()
1315 bp->vfdb->sriov.first_vf_in_pf + bnx2x_iov_remove_one()
1318 bp->vfdb->sriov.first_vf_in_pf + vf_idx); bnx2x_iov_remove_one()
1319 bnx2x_vf_enable_internal(bp, 0); bnx2x_iov_remove_one()
1320 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_iov_remove_one()
1324 __bnx2x_iov_free_vfdb(bp); bnx2x_iov_remove_one()
1327 void bnx2x_iov_free_mem(struct bnx2x *bp) bnx2x_iov_free_mem() argument
1331 if (!IS_SRIOV(bp)) bnx2x_iov_free_mem()
1336 struct hw_dma *cxt = &bp->vfdb->context[i]; bnx2x_iov_free_mem()
1340 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, bnx2x_iov_free_mem()
1341 BP_VFDB(bp)->sp_dma.mapping, bnx2x_iov_free_mem()
1342 BP_VFDB(bp)->sp_dma.size); bnx2x_iov_free_mem()
1344 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, bnx2x_iov_free_mem()
1345 BP_VF_MBX_DMA(bp)->mapping, bnx2x_iov_free_mem()
1346 BP_VF_MBX_DMA(bp)->size); bnx2x_iov_free_mem()
1348 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, bnx2x_iov_free_mem()
1349 BP_VF_BULLETIN_DMA(bp)->mapping, bnx2x_iov_free_mem()
1350 BP_VF_BULLETIN_DMA(bp)->size); bnx2x_iov_free_mem()
1353 int bnx2x_iov_alloc_mem(struct bnx2x *bp) bnx2x_iov_alloc_mem() argument
1358 if (!IS_SRIOV(bp)) bnx2x_iov_alloc_mem()
1362 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * bnx2x_iov_alloc_mem()
1366 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); bnx2x_iov_alloc_mem()
1381 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); bnx2x_iov_alloc_mem()
1382 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, bnx2x_iov_alloc_mem()
1384 if (!BP_VFDB(bp)->sp_dma.addr) bnx2x_iov_alloc_mem()
1386 BP_VFDB(bp)->sp_dma.size = tot_size; bnx2x_iov_alloc_mem()
1389 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; bnx2x_iov_alloc_mem()
1390 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, bnx2x_iov_alloc_mem()
1392 if (!BP_VF_MBX_DMA(bp)->addr) bnx2x_iov_alloc_mem()
1395 BP_VF_MBX_DMA(bp)->size = tot_size; bnx2x_iov_alloc_mem()
1398 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; bnx2x_iov_alloc_mem()
1399 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, bnx2x_iov_alloc_mem()
1401 if (!BP_VF_BULLETIN_DMA(bp)->addr) bnx2x_iov_alloc_mem()
1404 BP_VF_BULLETIN_DMA(bp)->size = tot_size; bnx2x_iov_alloc_mem()
1412 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vfq_init() argument
1423 bnx2x_init_queue_obj(bp, &q->sp_obj, bnx2x_vfq_init()
1425 bnx2x_vf_sp(bp, vf, q_data), bnx2x_vfq_init()
1426 bnx2x_vf_sp_map(bp, vf, q_data), bnx2x_vfq_init()
1437 static int bnx2x_max_speed_cap(struct bnx2x *bp) bnx2x_max_speed_cap() argument
1439 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; bnx2x_max_speed_cap()
1448 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) bnx2x_iov_link_update_vf() argument
1450 struct bnx2x_link_report_data *state = &bp->last_reported_link; bnx2x_iov_link_update_vf()
1457 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); bnx2x_iov_link_update_vf()
1461 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_iov_link_update_vf()
1487 bulletin->link_speed = bnx2x_max_speed_cap(bp); bnx2x_iov_link_update_vf()
1499 rc = bnx2x_post_vf_bulletin(bp, idx); bnx2x_iov_link_update_vf()
1507 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_iov_link_update_vf()
1513 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_link_state() local
1514 struct bnx2x_virtf *vf = BP_VF(bp, idx); bnx2x_set_vf_link_state()
1524 return bnx2x_iov_link_update_vf(bp, idx); bnx2x_set_vf_link_state()
1527 void bnx2x_iov_link_update(struct bnx2x *bp) bnx2x_iov_link_update() argument
1531 if (!IS_SRIOV(bp)) bnx2x_iov_link_update()
1534 for_each_vf(bp, vfid) bnx2x_iov_link_update()
1535 bnx2x_iov_link_update_vf(bp, vfid); bnx2x_iov_link_update()
1539 int bnx2x_iov_nic_init(struct bnx2x *bp) bnx2x_iov_nic_init() argument
1543 if (!IS_SRIOV(bp)) { bnx2x_iov_nic_init()
1548 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); bnx2x_iov_nic_init()
1554 for_each_vf(bp, vfid) { for_each_vf()
1555 struct bnx2x_virtf *vf = BP_VF(bp, vfid); for_each_vf()
1557 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * for_each_vf()
1561 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + for_each_vf()
1570 bnx2x_iov_static_resc(bp, vf); for_each_vf()
1574 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); for_each_vf()
1587 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, for_each_vf()
1589 bnx2x_vf_sp(bp, vf, mcast_rdata), for_each_vf()
1590 bnx2x_vf_sp_map(bp, vf, mcast_rdata), for_each_vf()
1596 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) for_each_vf()
1597 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * for_each_vf()
1600 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + for_each_vf()
1604 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); for_each_vf()
1608 for_each_vf(bp, vfid) { for_each_vf()
1609 struct bnx2x_virtf *vf = BP_VF(bp, vfid); for_each_vf()
1612 vf->bus = bnx2x_vf_bus(bp, vfid); for_each_vf()
1613 vf->devfn = bnx2x_vf_devfn(bp, vfid); for_each_vf()
1614 bnx2x_vf_set_bars(bp, vf); for_each_vf()
1628 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) bnx2x_iov_chip_cleanup() argument
1632 if (!IS_SRIOV(bp)) bnx2x_iov_chip_cleanup()
1636 for_each_vf(bp, i) bnx2x_iov_chip_cleanup()
1637 bnx2x_vf_release(bp, BP_VF(bp, i)); bnx2x_iov_chip_cleanup()
1643 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) bnx2x_iov_init_ilt() argument
1646 struct bnx2x_ilt *ilt = BP_ILT(bp); bnx2x_iov_init_ilt()
1648 if (!IS_SRIOV(bp)) bnx2x_iov_init_ilt()
1653 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); bnx2x_iov_init_ilt()
1662 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) bnx2x_iov_is_vf_cid() argument
1669 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, bnx2x_vf_handle_classification_eqe() argument
1681 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, bnx2x_vf_handle_classification_eqe()
1685 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, bnx2x_vf_handle_classification_eqe()
1700 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, bnx2x_vf_handle_mcast_eqe() argument
1711 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); bnx2x_vf_handle_mcast_eqe()
1719 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, bnx2x_vf_handle_filters_eqe() argument
1727 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, bnx2x_vf_handle_rss_update_eqe() argument
1733 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) bnx2x_iov_eq_sp_event() argument
1740 if (!IS_SRIOV(bp)) bnx2x_iov_eq_sp_event()
1778 if (!bnx2x_iov_is_vf_cid(bp, cid)) { bnx2x_iov_eq_sp_event()
1790 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_iov_eq_sp_event()
1802 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, bnx2x_iov_eq_sp_event()
1810 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); bnx2x_iov_eq_sp_event()
1815 bnx2x_vf_handle_mcast_eqe(bp, vf); bnx2x_iov_eq_sp_event()
1820 bnx2x_vf_handle_filters_eqe(bp, vf); bnx2x_iov_eq_sp_event()
1825 bnx2x_vf_handle_rss_update_eqe(bp, vf); bnx2x_iov_eq_sp_event()
1835 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) bnx2x_vf_by_cid() argument
1842 return bnx2x_vf_by_abs_fid(bp, abs_vfid); bnx2x_vf_by_cid()
1845 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, bnx2x_iov_set_queue_sp_obj() argument
1850 if (!IS_SRIOV(bp)) bnx2x_iov_set_queue_sp_obj()
1853 vf = bnx2x_vf_by_cid(bp, vf_cid); bnx2x_iov_set_queue_sp_obj()
1867 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bnx2x_iov_adjust_stats_req() argument
1876 if (!IS_SRIOV(bp)) bnx2x_iov_adjust_stats_req()
1879 if (!NO_FCOE(bp)) bnx2x_iov_adjust_stats_req()
1883 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; bnx2x_iov_adjust_stats_req()
1889 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, bnx2x_iov_adjust_stats_req()
1892 cur_data_offset = bp->fw_stats_data_mapping + bnx2x_iov_adjust_stats_req()
1896 cur_query_entry = &bp->fw_stats_req-> bnx2x_iov_adjust_stats_req()
1899 for_each_vf(bp, i) { for_each_vf()
1901 struct bnx2x_virtf *vf = BP_VF(bp, i); for_each_vf()
1918 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == for_each_vfq()
1945 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1949 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, bnx2x_vf_qtbl_set_q() argument
1955 REG_WR(bp, reg, val); bnx2x_vf_qtbl_set_q()
1958 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_clr_qtbl() argument
1963 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, bnx2x_vf_clr_qtbl()
1967 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_igu_disable() argument
1972 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); bnx2x_vf_igu_disable()
1973 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); bnx2x_vf_igu_disable()
1976 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); bnx2x_vf_igu_disable()
1977 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); bnx2x_vf_igu_disable()
1980 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_max_queue_cnt() argument
1987 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_chk_avail_resc() argument
1990 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_chk_avail_resc()
1991 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_chk_avail_resc()
2001 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_acquire() argument
2004 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * bnx2x_vf_acquire()
2008 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + bnx2x_vf_acquire()
2021 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { bnx2x_vf_acquire()
2040 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { bnx2x_vf_acquire()
2049 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_acquire()
2050 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_acquire()
2080 bnx2x_vfq_init(bp, vf, q); for_each_vfq()
2086 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) bnx2x_vf_init() argument
2095 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, for_each_vf_sb()
2109 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2113 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2116 func_init.pf_id = BP_FUNC(bp);
2118 bnx2x_func_init(bp, &func_init);
2121 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2122 bnx2x_vf_enable_traffic(bp, vf);
2126 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2132 bnx2x_post_vf_bulletin(bp, vf->index);
2149 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_close() argument
2157 rc = bnx2x_vf_queue_teardown(bp, vf, i); bnx2x_vf_close()
2164 bnx2x_vf_igu_disable(bp, vf); bnx2x_vf_close()
2168 bnx2x_vf_clr_qtbl(bp, vf); bnx2x_vf_close()
2179 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); bnx2x_vf_close()
2196 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_free() argument
2209 rc = bnx2x_vf_close(bp, vf); bnx2x_vf_free()
2215 bnx2x_vf_free_resc(bp, vf); bnx2x_vf_free()
2229 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_rss_update() argument
2234 return bnx2x_config_rss(bp, rss); bnx2x_vf_rss_update()
2237 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_tpa_update() argument
2260 rc = bnx2x_queue_state_change(bp, &qstate); bnx2x_vf_tpa_update()
2276 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_release() argument
2281 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_vf_release()
2283 rc = bnx2x_vf_free(bp, vf); bnx2x_vf_release()
2288 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_vf_release()
2292 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_lock_vf_pf_channel() argument
2312 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_unlock_vf_pf_channel() argument
2343 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) bnx2x_set_pf_tx_switching() argument
2350 prev_flags = bp->flags; bnx2x_set_pf_tx_switching()
2352 bp->flags |= TX_SWITCHING; bnx2x_set_pf_tx_switching()
2354 bp->flags &= ~TX_SWITCHING; bnx2x_set_pf_tx_switching()
2355 if (prev_flags == bp->flags) bnx2x_set_pf_tx_switching()
2359 if ((bp->state != BNX2X_STATE_OPEN) || bnx2x_set_pf_tx_switching()
2360 (bnx2x_get_q_logical_state(bp, bnx2x_set_pf_tx_switching()
2361 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != bnx2x_set_pf_tx_switching()
2379 for_each_eth_queue(bp, i) { for_each_eth_queue()
2380 struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_eth_queue()
2383 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; for_each_eth_queue()
2386 rc = bnx2x_queue_state_change(bp, &q_params); for_each_eth_queue()
2399 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); bnx2x_sriov_configure() local
2401 if (!IS_SRIOV(bp)) { bnx2x_sriov_configure()
2406 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", bnx2x_sriov_configure()
2407 num_vfs_param, BNX2X_NR_VIRTFN(bp)); bnx2x_sriov_configure()
2410 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_sriov_configure()
2416 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { bnx2x_sriov_configure()
2418 num_vfs_param, BNX2X_NR_VIRTFN(bp)); bnx2x_sriov_configure()
2419 num_vfs_param = BNX2X_NR_VIRTFN(bp); bnx2x_sriov_configure()
2422 bp->requested_nr_virtfn = num_vfs_param; bnx2x_sriov_configure()
2424 bnx2x_set_pf_tx_switching(bp, false); bnx2x_sriov_configure()
2425 bnx2x_disable_sriov(bp); bnx2x_sriov_configure()
2428 return bnx2x_enable_sriov(bp); bnx2x_sriov_configure()
2434 int bnx2x_enable_sriov(struct bnx2x *bp) bnx2x_enable_sriov() argument
2436 int rc = 0, req_vfs = bp->requested_nr_virtfn; bnx2x_enable_sriov()
2444 first_vf = bp->vfdb->sriov.first_vf_in_pf; bnx2x_enable_sriov()
2448 BP_VFDB(bp)->vf_sbs_pool / req_vfs); bnx2x_enable_sriov()
2452 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); bnx2x_enable_sriov()
2455 vf_sb_count(BP_VF(bp, vf_idx)) = 0; bnx2x_enable_sriov()
2457 bp->vfdb->vf_sbs_pool = 0; bnx2x_enable_sriov()
2460 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; bnx2x_enable_sriov()
2469 REG_WR(bp, address, igu_entry); bnx2x_enable_sriov()
2476 bnx2x_get_vf_igu_cam_info(bp); bnx2x_enable_sriov()
2479 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); bnx2x_enable_sriov()
2482 for_each_vf(bp, vf_idx) { for_each_vf()
2483 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); for_each_vf()
2486 vf->vfqs = &bp->vfdb->vfqs[qcount]; for_each_vf()
2488 bnx2x_iov_static_resc(bp, vf); for_each_vf()
2496 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2497 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2502 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2508 bnx2x_disable_sriov(bp);
2510 rc = bnx2x_set_pf_tx_switching(bp, true);
2514 rc = pci_enable_sriov(bp->pdev, req_vfs);
2523 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) bnx2x_pf_set_vfs_vlan() argument
2529 for_each_vf(bp, vfidx) { for_each_vf()
2530 bulletin = BP_VF_BULLETIN(bp, vfidx); for_each_vf()
2532 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); for_each_vf()
2536 void bnx2x_disable_sriov(struct bnx2x *bp) bnx2x_disable_sriov() argument
2538 if (pci_vfs_assigned(bp->pdev)) { bnx2x_disable_sriov()
2544 pci_disable_sriov(bp->pdev); bnx2x_disable_sriov()
2547 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, bnx2x_vf_op_prep() argument
2552 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_vf_op_prep()
2557 if (!IS_SRIOV(bp)) { bnx2x_vf_op_prep()
2562 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { bnx2x_vf_op_prep()
2564 vfidx, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_op_prep()
2569 *vf = BP_VF(bp, vfidx); bnx2x_vf_op_prep()
2570 *bulletin = BP_VF_BULLETIN(bp, vfidx); bnx2x_vf_op_prep()
2595 struct bnx2x *bp = netdev_priv(dev); bnx2x_get_vf_config() local
2603 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_get_vf_config()
2621 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { bnx2x_get_vf_config()
2622 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, bnx2x_get_vf_config()
2624 vlan_obj->get_n_elements(bp, vlan_obj, 1, bnx2x_get_vf_config()
2629 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_get_vf_config()
2646 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_get_vf_config()
2671 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_mac() local
2682 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_set_vf_mac()
2686 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_mac()
2695 rc = bnx2x_post_vf_bulletin(bp, vfidx); bnx2x_set_vf_mac()
2698 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_mac()
2706 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); bnx2x_set_vf_mac()
2714 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_set_vf_mac()
2718 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_set_vf_mac()
2722 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); bnx2x_set_vf_mac()
2730 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); bnx2x_set_vf_mac()
2739 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, bnx2x_set_vf_mac()
2743 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_set_vf_mac()
2749 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp, bnx2x_set_vf_vlan_acceptance() argument
2762 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, bnx2x_set_vf_vlan_acceptance()
2765 bnx2x_config_rx_mode(bp, &rx_ramrod); bnx2x_set_vf_vlan_acceptance()
2768 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_set_vf_vlan_filter() argument
2783 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); bnx2x_set_vf_vlan_filter()
2795 struct bnx2x *bp = netdev_priv(dev); bnx2x_set_vf_vlan() local
2811 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); bnx2x_set_vf_vlan()
2821 mutex_lock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_vlan()
2830 rc = bnx2x_post_vf_bulletin(bp, vfidx); bnx2x_set_vf_vlan()
2833 mutex_unlock(&bp->vfdb->bulletin_mutex); bnx2x_set_vf_vlan()
2837 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != bnx2x_set_vf_vlan()
2842 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) bnx2x_set_vf_vlan()
2846 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); bnx2x_set_vf_vlan()
2851 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, bnx2x_set_vf_vlan()
2863 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan); bnx2x_set_vf_vlan()
2865 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true); bnx2x_set_vf_vlan()
2879 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) != for_each_vfq()
2914 rc = bnx2x_queue_state_change(bp, &q_params); for_each_vfq()
2922 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2946 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) bnx2x_sample_bulletin() argument
2958 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, bnx2x_sample_bulletin()
2961 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); bnx2x_sample_bulletin()
2963 if (bp->shadow_bulletin.content.crc == crc) bnx2x_sample_bulletin()
2967 bp->shadow_bulletin.content.crc, crc); bnx2x_sample_bulletin()
2975 bulletin = &bp->shadow_bulletin.content; bnx2x_sample_bulletin()
2978 if (bp->old_bulletin.version == bulletin->version) bnx2x_sample_bulletin()
2983 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { bnx2x_sample_bulletin()
2985 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); bnx2x_sample_bulletin()
2992 bp->vf_link_vars.line_speed = bulletin->link_speed; bnx2x_sample_bulletin()
2993 bp->vf_link_vars.link_report_flags = 0; bnx2x_sample_bulletin()
2997 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin()
3001 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin()
3005 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin()
3009 &bp->vf_link_vars.link_report_flags); bnx2x_sample_bulletin()
3010 __bnx2x_link_report(bp); bnx2x_sample_bulletin()
3013 /* copy new bulletin board to bp */ bnx2x_sample_bulletin()
3014 memcpy(&bp->old_bulletin, bulletin, bnx2x_sample_bulletin()
3020 void bnx2x_timer_sriov(struct bnx2x *bp) bnx2x_timer_sriov() argument
3022 bnx2x_sample_bulletin(bp); bnx2x_timer_sriov()
3025 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) bnx2x_timer_sriov()
3026 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, bnx2x_timer_sriov()
3030 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) bnx2x_vf_doorbells() argument
3033 return bp->regview + PXP_VF_ADDR_DB_START; bnx2x_vf_doorbells()
3036 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) bnx2x_vf_pci_dealloc() argument
3038 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, bnx2x_vf_pci_dealloc()
3040 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, bnx2x_vf_pci_dealloc()
3044 int bnx2x_vf_pci_alloc(struct bnx2x *bp) bnx2x_vf_pci_alloc() argument
3046 mutex_init(&bp->vf2pf_mutex); bnx2x_vf_pci_alloc()
3049 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, bnx2x_vf_pci_alloc()
3051 if (!bp->vf2pf_mbox) bnx2x_vf_pci_alloc()
3055 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, bnx2x_vf_pci_alloc()
3057 if (!bp->pf2vf_bulletin) bnx2x_vf_pci_alloc()
3060 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); bnx2x_vf_pci_alloc()
3065 bnx2x_vf_pci_dealloc(bp); bnx2x_vf_pci_alloc()
3069 void bnx2x_iov_channel_down(struct bnx2x *bp) bnx2x_iov_channel_down() argument
3074 if (!IS_SRIOV(bp)) bnx2x_iov_channel_down()
3077 for_each_vf(bp, vf_idx) { for_each_vf()
3081 bulletin = BP_VF_BULLETIN(bp, vf_idx); for_each_vf()
3085 bnx2x_post_vf_bulletin(bp, vf_idx); for_each_vf()
3091 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); bnx2x_iov_task() local
3093 if (!netif_running(bp->dev)) bnx2x_iov_task()
3097 &bp->iov_task_state)) bnx2x_iov_task()
3098 bnx2x_vf_handle_flr_event(bp); bnx2x_iov_task()
3101 &bp->iov_task_state)) bnx2x_iov_task()
3102 bnx2x_vf_mbx(bp); bnx2x_iov_task()
3105 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) bnx2x_schedule_iov_task() argument
3108 set_bit(flag, &bp->iov_task_state); bnx2x_schedule_iov_task()
3111 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); bnx2x_schedule_iov_task()
H A Dbnx2x.h47 #define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
88 bp->dev ? (bp->dev->name) : "?", \
93 if (unlikely(bp->msg_enable & (__mask))) \
99 if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
105 if (unlikely(bp->msg_enable & (__mask))) \
112 if (unlikely(netif_msg_probe(bp))) \
115 bp->dev ? (bp->dev->name) : "?", \
124 bp->dev ? (bp->dev->name) : "?", \
134 if (unlikely(netif_msg_probe(bp))) \
135 dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
139 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
143 bp->panic = 1; \
145 bnx2x_panic_dump(bp, true); \
150 bp->panic = 1; \
152 bnx2x_panic_dump(bp, false); \
163 #define REG_ADDR(bp, offset) ((bp->regview) + (offset))
165 #define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
166 #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
167 #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
169 #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
170 #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
171 #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
173 #define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset)
174 #define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val)
176 #define REG_RD_DMAE(bp, offset, valp, len32) \
178 bnx2x_read_dmae(bp, offset, len32);\
179 memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
182 #define REG_WR_DMAE(bp, offset, valp, len32) \
184 memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
185 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
189 #define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
190 REG_WR_DMAE(bp, offset, valp, len32)
192 #define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
194 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
195 bnx2x_write_big_buf_wb(bp, addr, len32); \
198 #define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \
200 #define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
201 #define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
203 #define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \
205 #define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
206 #define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
207 #define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
209 #define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
212 #define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
213 #define MF_CFG_WR(bp, field, val) REG_WR(bp,\
214 MF_CFG_ADDR(bp, field), (val))
215 #define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
217 #define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
218 (SHMEM2_RD((bp), size) > \
221 #define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
222 #define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
244 (&bp->def_status_blk->sp_sb.\
248 (&bp->def_status_blk->sp_sb.\
275 #define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
276 (bp)->max_cos)
280 #define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
283 #define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
286 #define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
288 #define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
289 (UIO_DPM_ALIGN(bp) == UIO_DPM))
291 #define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
292 (UIO_DPM_CID0_OFFSET(bp)))
294 #define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
295 BNX2X_1st_NON_L2_ETH_CID(bp))
297 #define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
299 #define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
301 #define CNIC_SUPPORT(bp) ((bp)->cnic_support)
302 #define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
303 #define CNIC_LOADED(bp) ((bp)->cnic_loaded)
304 #define FCOE_INIT(bp) ((bp)->fcoe_init)
317 #define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
318 #define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
319 (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
322 #define FP_COS_TO_TXQ(fp, cos, bp) \
323 ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
336 #define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
337 #define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
371 #define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512)
372 #define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \
375 #define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
419 #define NUM_SGE_REQ (MAX_AGG_QS(bp) + \
420 (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
423 #define SGE_TH_LO(bp) (NUM_SGE_REQ + \
425 #define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
539 struct bnx2x *bp; /* parent */ member in struct:bnx2x_fastpath
615 #define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
616 #define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
617 #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
618 #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
734 #define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
736 #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
737 #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
738 #define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
739 #define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
740 #define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
744 #define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
745 #define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
746 #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
799 #define NUM_BD_REQ BRB_SIZE(bp)
802 #define BD_TH_LO(bp) (NUM_BD_REQ + \
804 FW_DROP_LEVEL(bp))
805 #define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
807 #define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
809 #define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
847 #define NUM_RCQ_REQ BRB_SIZE(bp)
850 #define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \
852 FW_DROP_LEVEL(bp))
853 #define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
876 #define DOORBELL(bp, cid, val) \
878 writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
956 #define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0)
958 #define CHIP_NUM(bp) (bp->common.chip_id >> 16)
982 #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
983 #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
984 #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
985 #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
986 #define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
987 #define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
988 #define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
989 #define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
990 #define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
991 #define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
992 #define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
993 #define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
994 #define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
995 #define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
996 #define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
997 #define CHIP_IS_57840(bp) \
998 ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
999 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
1000 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
1001 #define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
1002 (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
1003 #define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
1004 #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
1005 CHIP_IS_57711E(bp))
1006 #define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \
1007 CHIP_IS_57811_MF(bp) || \
1008 CHIP_IS_57811_VF(bp))
1009 #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
1010 CHIP_IS_57712_MF(bp) || \
1011 CHIP_IS_57712_VF(bp))
1012 #define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
1013 CHIP_IS_57800_MF(bp) || \
1014 CHIP_IS_57800_VF(bp) || \
1015 CHIP_IS_57810(bp) || \
1016 CHIP_IS_57810_MF(bp) || \
1017 CHIP_IS_57810_VF(bp) || \
1018 CHIP_IS_57811xx(bp) || \
1019 CHIP_IS_57840(bp) || \
1020 CHIP_IS_57840_MF(bp) || \
1021 CHIP_IS_57840_VF(bp))
1022 #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
1023 #define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
1024 #define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
1028 #define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
1032 #define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
1034 #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
1035 !(CHIP_REV_VAL(bp) & 0x00001000))
1037 #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
1038 (CHIP_REV_VAL(bp) & 0x00001000))
1040 #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
1041 ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
1043 #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
1044 #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
1045 #define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
1048 #define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
1049 CHIP_REV_SIM(bp) :\
1050 CHIP_REV_VAL(bp))
1051 #define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
1052 (CHIP_REV(bp) == CHIP_REV_Bx))
1053 #define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
1054 (CHIP_REV(bp) == CHIP_REV_Ax))
1066 #define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
1087 #define CHIP_INT_MODE_IS_NBC(bp) \
1088 (!CHIP_IS_E1x(bp) && \
1089 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
1090 #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
1096 #define CHIP_MODE(bp) (bp->common.chip_port_mode)
1097 #define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
1285 #define bnx2x_sp(bp, var) (&bp->slowpath->var)
1286 #define bnx2x_sp_mapping(bp, var) \
1287 (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
1342 (&bp->def_status_blk->sp_sb.\
1460 #define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
1461 #define BP_PORT(bp) (bp->pfid & 1)
1462 #define BP_FUNC(bp) (bp->pfid)
1463 #define BP_ABS_FUNC(bp) (bp->pf_num)
1464 #define BP_VN(bp) ((bp)->pfid >> 1)
1465 #define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
1466 #define BP_L_ID(bp) (BP_VN(bp) << 2)
1467 #define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\
1468 (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
1469 #define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1495 #define IRO (bp->iro_arr)
1593 #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1596 #define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
1597 #define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
1599 #define IS_VF(bp) false
1600 #define IS_PF(bp) true
1603 #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1604 #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1605 #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1650 #define IS_MF(bp) (bp->mf_mode != 0)
1651 #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1652 #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
1653 #define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
1655 #define IS_MF_UFP(bp) (IS_MF_SD(bp) && \
1656 bp->mf_sub_mode == SUB_MF_MODE_UFP)
1657 #define IS_MF_BD(bp) (IS_MF_SD(bp) && \
1658 bp->mf_sub_mode == SUB_MF_MODE_BD)
1750 #define BP_ILT(bp) ((bp)->ilt)
1756 #define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
1763 #define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1764 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1765 #define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1766 + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
1767 #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1825 #define GUNZIP_BUF(bp) (bp->gunzip_buf)
1826 #define GUNZIP_PHYS(bp) (bp->gunzip_mapping)
1827 #define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen)
1835 #define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
1845 #define INIT_OPS(bp) (bp->init_ops)
1846 #define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets)
1847 #define INIT_DATA(bp) (bp->init_data)
1848 #define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data)
1849 #define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data)
1850 #define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data)
1851 #define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data)
1852 #define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data)
1853 #define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data)
1854 #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1855 #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1862 #define IS_SRIOV(bp) ((bp)->vfdb)
1955 #define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1956 #define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
1957 #define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1958 (bp)->num_cnic_queues)
1959 #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1961 #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1963 #define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
1964 /* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
1988 #define for_each_cnic_queue(bp, var) \
1989 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1991 if (skip_queue(bp, var)) \
1995 #define for_each_eth_queue(bp, var) \
1996 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1998 #define for_each_nondefault_eth_queue(bp, var) \
1999 for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
2001 #define for_each_queue(bp, var) \
2002 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
2003 if (skip_queue(bp, var)) \
2008 #define for_each_valid_rx_queue(bp, var) \
2010 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
2011 BNX2X_NUM_ETH_QUEUES(bp)); \
2013 if (skip_rx_queue(bp, var)) \
2017 #define for_each_rx_queue_cnic(bp, var) \
2018 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
2020 if (skip_rx_queue(bp, var)) \
2024 #define for_each_rx_queue(bp, var) \
2025 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
2026 if (skip_rx_queue(bp, var)) \
2031 #define for_each_valid_tx_queue(bp, var) \
2033 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
2034 BNX2X_NUM_ETH_QUEUES(bp)); \
2036 if (skip_tx_queue(bp, var)) \
2040 #define for_each_tx_queue_cnic(bp, var) \
2041 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
2043 if (skip_tx_queue(bp, var)) \
2047 #define for_each_tx_queue(bp, var) \
2048 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
2049 if (skip_tx_queue(bp, var)) \
2053 #define for_each_nondefault_queue(bp, var) \
2054 for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
2055 if (skip_queue(bp, var)) \
2065 #define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
2070 #define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
2072 #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
2077 * @bp: driver handle
2092 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
2096 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
2103 * @bp: driver handle
2114 int bnx2x_del_all_macs(struct bnx2x *bp,
2119 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
2120 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
2122 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
2123 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
2124 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
2125 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
2126 void bnx2x_read_mf_cfg(struct bnx2x *bp);
2128 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
2131 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
2132 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
2134 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
2137 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
2140 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2142 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
2146 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
2147 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
2148 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
2150 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
2153 void bnx2x_calc_fc_adv(struct bnx2x *bp);
2154 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2156 void bnx2x_update_coalesce(struct bnx2x *bp);
2157 int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
2159 bool bnx2x_port_after_undi(struct bnx2x *bp);
2161 static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, reg_poll() argument
2167 val = REG_RD(bp, reg); reg_poll()
2178 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2182 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2187 dma_free_coherent(&bp->pdev->dev, size, x, y); \
2281 #define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
2288 #define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
2289 BP_VN(bp))
2290 #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
2301 #define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
2302 IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
2316 #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
2317 (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
2359 #define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \
2360 IS_MF_FCOE_AFEX(bp))
2365 GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
2477 (&bp->def_status_blk->sp_sb.\
2487 #define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
2488 TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
2508 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
2525 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
2526 void bnx2x_notify_link_changed(struct bnx2x *bp);
2528 #define BNX2X_MF_SD_PROTOCOL(bp) \
2529 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
2531 #define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
2532 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
2534 #define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \
2535 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE)
2537 #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
2538 #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
2539 #define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp))
2541 #define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp))
2548 #define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \
2551 #define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \
2552 (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
2554 #define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \
2555 (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
2557 #define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \
2558 (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)
2560 #define IS_MF_FCOE_AFEX(bp) \
2561 (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))
2563 #define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \
2564 (IS_MF_SD(bp) && \
2565 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2566 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2568 #define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \
2569 (IS_MF_SI(bp) && \
2570 (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \
2571 BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)))
2573 #define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \
2574 (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \
2575 IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
2580 #define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
2601 void bnx2x_set_local_cmng(struct bnx2x *bp);
2603 void bnx2x_update_mng_version(struct bnx2x *bp);
2605 void bnx2x_update_mfw_dump(struct bnx2x *bp);
2607 #define MCPR_SCRATCH_BASE(bp) \
2608 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2612 void bnx2x_init_ptp(struct bnx2x *bp);
2613 int bnx2x_configure_ptp_filters(struct bnx2x *bp);
2614 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
2622 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
H A Dbnx2x_sriov.h214 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
216 #define for_each_vf(bp, var) \
217 for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
227 #define HW_VF_HANDLE(bp, abs_vfid) \
228 (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
235 #define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
236 #define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
242 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
245 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
313 #define BP_VFDB(bp) ((bp)->vfdb)
316 #define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
317 &((bp)->vfdb->vfs[idx]) : NULL)
318 #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
325 #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i])
330 #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
332 #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid]))
335 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
336 #define BP_VF_BULLETIN(bp, vf) \
337 (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
341 #define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
344 #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
399 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
400 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
401 void bnx2x_iov_remove_one(struct bnx2x *bp);
402 void bnx2x_iov_free_mem(struct bnx2x *bp);
403 int bnx2x_iov_alloc_mem(struct bnx2x *bp);
404 int bnx2x_iov_nic_init(struct bnx2x *bp);
405 int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
406 void bnx2x_iov_init_dq(struct bnx2x *bp);
407 void bnx2x_iov_init_dmae(struct bnx2x *bp);
408 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
410 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
411 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
412 void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
414 void bnx2x_vf_mbx(struct bnx2x *bp);
415 void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
417 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
423 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
426 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
430 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
435 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
440 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
446 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
450 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
453 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
455 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
458 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
461 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
463 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
465 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
468 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
477 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
478 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
479 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
484 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
485 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
488 void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
493 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
497 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
500 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
501 int bnx2x_vfpf_release(struct bnx2x *bp);
502 int bnx2x_vfpf_release(struct bnx2x *bp);
503 int bnx2x_vfpf_init(struct bnx2x *bp);
504 void bnx2x_vfpf_close_vf(struct bnx2x *bp);
505 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
507 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
508 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
511 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
513 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, bnx2x_vf_fill_fw_str() argument
516 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); bnx2x_vf_fill_fw_str()
519 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, bnx2x_vf_ustorm_prods_offset() argument
523 bp->acquire_resp.resc.hw_qid[fp->index] * bnx2x_vf_ustorm_prods_offset()
527 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
528 void bnx2x_timer_sriov(struct bnx2x *bp);
529 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
530 void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
531 int bnx2x_vf_pci_alloc(struct bnx2x *bp);
532 int bnx2x_enable_sriov(struct bnx2x *bp);
533 void bnx2x_disable_sriov(struct bnx2x *bp); bnx2x_vf_headroom()
534 static inline int bnx2x_vf_headroom(struct bnx2x *bp) bnx2x_vf_headroom() argument
536 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; bnx2x_vf_headroom()
538 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
540 void bnx2x_iov_channel_down(struct bnx2x *bp);
544 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
546 void bnx2x_iov_link_update(struct bnx2x *bp);
547 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
551 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
554 #define GET_NUM_VFS_PER_PATH(bp) 0
555 #define GET_NUM_VFS_PER_PF(bp) 0
559 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, bnx2x_iov_set_queue_sp_obj() argument
561 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} bnx2x_iov_eq_sp_event() argument
562 static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, bnx2x_iov_eq_sp_event() argument
564 static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} bnx2x_vf_mbx_schedule() argument
565 static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, bnx2x_vf_mbx_schedule() argument
567 static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } bnx2x_iov_init_dq() argument
568 static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} bnx2x_iov_alloc_mem() argument
569 static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } bnx2x_iov_free_mem() argument
570 static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} bnx2x_iov_chip_cleanup() argument
571 static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } bnx2x_iov_init_dmae() argument
572 static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} bnx2x_iov_init_one() argument
573 static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, bnx2x_iov_init_one() argument
575 static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} bnx2x_enable_sriov() argument
576 static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } bnx2x_disable_sriov() argument
577 static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} bnx2x_vfpf_acquire() argument
578 static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, bnx2x_vfpf_acquire() argument
580 static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } bnx2x_vfpf_init() argument
581 static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } bnx2x_vfpf_close_vf() argument
582 static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} bnx2x_vfpf_setup_q() argument
583 static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } bnx2x_vfpf_config_mac() argument
584 static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, bnx2x_vfpf_config_mac() argument
586 static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, bnx2x_vfpf_config_rss() argument
589 static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } bnx2x_iov_nic_init() argument
590 static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } bnx2x_vf_headroom() argument
591 static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } bnx2x_iov_adjust_stats_req() argument
592 static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} bnx2x_vf_fill_fw_str() argument
593 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, bnx2x_vf_fill_fw_str() argument
595 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, bnx2x_vf_ustorm_prods_offset() argument
597 static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) bnx2x_sample_bulletin() argument
601 static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} bnx2x_timer_sriov() argument
603 static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) bnx2x_vf_doorbells() argument
608 static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {} bnx2x_vf_pci_alloc() argument
609 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } bnx2x_pf_set_vfs_vlan() argument
610 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} bnx2x_sriov_configure() argument
612 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} bnx2x_iov_channel_down() argument
615 static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} bnx2x_iov_link_update() argument
616 static inline void bnx2x_iov_link_update(struct bnx2x *bp) {} bnx2x_iov_link_update_vf() argument
617 static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; } bnx2x_iov_link_update_vf() argument
625 static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; } argument
H A Dbnx2x_vfpf.c26 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
29 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, bnx2x_add_tlv() argument
40 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, bnx2x_vfpf_prep() argument
43 mutex_lock(&bp->vf2pf_mutex); bnx2x_vfpf_prep()
49 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); bnx2x_vfpf_prep()
52 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); bnx2x_vfpf_prep()
55 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); bnx2x_vfpf_prep()
59 static void bnx2x_vfpf_finalize(struct bnx2x *bp, bnx2x_vfpf_finalize() argument
65 mutex_unlock(&bp->vf2pf_mutex); bnx2x_vfpf_finalize()
69 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, bnx2x_search_tlv_list() argument
93 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) bnx2x_dp_tlv_list() argument
141 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) bnx2x_send_msg2pf() argument
144 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); bnx2x_send_msg2pf()
156 bnx2x_sample_bulletin(bp); bnx2x_send_msg2pf()
157 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { bnx2x_send_msg2pf()
194 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) bnx2x_get_vf_id() argument
201 me_reg = readl(bp->doorbells); bnx2x_get_vf_id()
223 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bnx2x_vfpf_acquire() argument
226 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; bnx2x_vfpf_acquire()
227 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; bnx2x_vfpf_acquire()
234 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); bnx2x_vfpf_acquire()
236 if (bnx2x_get_vf_id(bp, &vf_id)) { bnx2x_vfpf_acquire()
247 req->resc_request.num_sbs = bp->igu_sb_cnt; bnx2x_vfpf_acquire()
253 req->bulletin_addr = bp->pf2vf_bulletin_mapping; bnx2x_vfpf_acquire()
256 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, bnx2x_vfpf_acquire()
265 bnx2x_add_tlv(bp, req, bnx2x_vfpf_acquire()
271 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_acquire()
277 rc = bnx2x_send_msg2pf(bp, bnx2x_vfpf_acquire()
279 bp->vf2pf_mbox_mapping); bnx2x_vfpf_acquire()
285 /* copy acquire response from buffer to bp */ bnx2x_vfpf_acquire()
286 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); bnx2x_vfpf_acquire()
293 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { bnx2x_vfpf_acquire()
296 } else if (bp->acquire_resp.hdr.status == bnx2x_vfpf_acquire()
305 bp->acquire_resp.resc.num_txqs); bnx2x_vfpf_acquire()
308 bp->acquire_resp.resc.num_rxqs); bnx2x_vfpf_acquire()
311 bp->acquire_resp.resc.num_sbs); bnx2x_vfpf_acquire()
314 bp->acquire_resp.resc.num_mac_filters); bnx2x_vfpf_acquire()
317 bp->acquire_resp.resc.num_vlan_filters); bnx2x_vfpf_acquire()
320 bp->acquire_resp.resc.num_mc_filters); bnx2x_vfpf_acquire()
323 memset(&bp->vf2pf_mbox->resp, 0, bnx2x_vfpf_acquire()
327 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire()
333 bp->acquire_resp.hdr.status); bnx2x_vfpf_acquire()
341 bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire()
344 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); bnx2x_vfpf_acquire()
345 bp->flags |= HAS_PHYS_PORT_ID; bnx2x_vfpf_acquire()
352 fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, bnx2x_vfpf_acquire()
360 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire()
361 bnx2x_vfpf_release(bp); bnx2x_vfpf_acquire()
368 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); bnx2x_vfpf_acquire()
369 bp->link_params.chip_id = bp->common.chip_id; bnx2x_vfpf_acquire()
370 bp->db_size = bp->acquire_resp.pfdev_info.db_size; bnx2x_vfpf_acquire()
371 bp->common.int_block = INT_BLOCK_IGU; bnx2x_vfpf_acquire()
372 bp->common.chip_port_mode = CHIP_2_PORT_MODE; bnx2x_vfpf_acquire()
373 bp->igu_dsb_id = -1; bnx2x_vfpf_acquire()
374 bp->mf_ov = 0; bnx2x_vfpf_acquire()
375 bp->mf_mode = 0; bnx2x_vfpf_acquire()
376 bp->common.flash_size = 0; bnx2x_vfpf_acquire()
377 bp->flags |= bnx2x_vfpf_acquire()
379 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bnx2x_vfpf_acquire()
380 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; bnx2x_vfpf_acquire()
381 bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters; bnx2x_vfpf_acquire()
383 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, bnx2x_vfpf_acquire()
384 sizeof(bp->fw_ver)); bnx2x_vfpf_acquire()
386 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) bnx2x_vfpf_acquire()
387 memcpy(bp->dev->dev_addr, bnx2x_vfpf_acquire()
388 bp->acquire_resp.resc.current_mac_addr, bnx2x_vfpf_acquire()
392 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire()
396 int bnx2x_vfpf_release(struct bnx2x *bp) bnx2x_vfpf_release() argument
398 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; bnx2x_vfpf_release()
399 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_release()
403 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); bnx2x_vfpf_release()
405 if (bnx2x_get_vf_id(bp, &vf_id)) { bnx2x_vfpf_release()
413 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_release()
417 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_release()
420 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_release()
437 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_release()
443 int bnx2x_vfpf_init(struct bnx2x *bp) bnx2x_vfpf_init() argument
445 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; bnx2x_vfpf_init()
446 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_init()
450 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); bnx2x_vfpf_init()
453 for_each_eth_queue(bp, i) bnx2x_vfpf_init()
454 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, bnx2x_vfpf_init()
458 req->stats_addr = bp->fw_stats_data_mapping + bnx2x_vfpf_init()
464 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_init()
468 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_init()
470 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_init()
483 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_init()
489 void bnx2x_vfpf_close_vf(struct bnx2x *bp) bnx2x_vfpf_close_vf() argument
491 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; bnx2x_vfpf_close_vf()
492 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_close_vf()
499 if (bnx2x_get_vf_id(bp, &vf_id)) bnx2x_vfpf_close_vf()
503 for_each_queue(bp, i) bnx2x_vfpf_close_vf()
504 bnx2x_vfpf_teardown_queue(bp, i); bnx2x_vfpf_close_vf()
507 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false); bnx2x_vfpf_close_vf()
510 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); bnx2x_vfpf_close_vf()
515 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_close_vf()
519 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_close_vf()
521 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_close_vf()
530 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_close_vf()
534 bnx2x_netif_stop(bp, 0); bnx2x_vfpf_close_vf()
536 bnx2x_del_all_napi(bp); bnx2x_vfpf_close_vf()
539 bnx2x_free_irq(bp); bnx2x_vfpf_close_vf()
542 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_leading_vfq_init() argument
549 bnx2x_init_mac_obj(bp, &q->mac_obj, bnx2x_leading_vfq_init()
551 bnx2x_vf_sp(bp, vf, mac_rdata), bnx2x_leading_vfq_init()
552 bnx2x_vf_sp_map(bp, vf, mac_rdata), bnx2x_leading_vfq_init()
558 bnx2x_init_vlan_obj(bp, &q->vlan_obj, bnx2x_leading_vfq_init()
560 bnx2x_vf_sp(bp, vf, vlan_rdata), bnx2x_leading_vfq_init()
561 bnx2x_vf_sp_map(bp, vf, vlan_rdata), bnx2x_leading_vfq_init()
567 bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj, bnx2x_leading_vfq_init()
569 bnx2x_vf_sp(bp, vf, vlan_mac_rdata), bnx2x_leading_vfq_init()
570 bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata), bnx2x_leading_vfq_init()
577 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, bnx2x_leading_vfq_init()
579 bnx2x_vf_sp(bp, vf, mcast_rdata), bnx2x_leading_vfq_init()
580 bnx2x_vf_sp_map(bp, vf, mcast_rdata), bnx2x_leading_vfq_init()
586 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, bnx2x_leading_vfq_init()
588 bnx2x_vf_sp(bp, vf, rss_rdata), bnx2x_leading_vfq_init()
589 bnx2x_vf_sp_map(bp, vf, rss_rdata), bnx2x_leading_vfq_init()
600 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_vfpf_setup_q() argument
603 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; bnx2x_vfpf_setup_q()
604 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_setup_q()
610 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); bnx2x_vfpf_setup_q()
640 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; bnx2x_vfpf_setup_q()
641 req->rxq.mtu = bp->dev->mtu; bnx2x_vfpf_setup_q()
645 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; bnx2x_vfpf_setup_q()
657 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; bnx2x_vfpf_setup_q()
662 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_setup_q()
666 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_setup_q()
668 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_setup_q()
679 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_setup_q()
684 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) bnx2x_vfpf_teardown_queue() argument
686 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; bnx2x_vfpf_teardown_queue()
687 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_teardown_queue()
691 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, bnx2x_vfpf_teardown_queue()
697 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_teardown_queue()
701 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_teardown_queue()
703 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_teardown_queue()
719 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_teardown_queue()
725 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) bnx2x_vfpf_config_mac() argument
727 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_config_mac()
728 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_config_mac()
729 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; bnx2x_vfpf_config_mac()
733 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_config_mac()
745 bnx2x_sample_bulletin(bp); bnx2x_vfpf_config_mac()
751 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_mac()
755 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_mac()
758 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_mac()
770 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); bnx2x_vfpf_config_mac()
773 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { bnx2x_vfpf_config_mac()
775 memcpy(req->filters[0].mac, bp->dev->dev_addr, bnx2x_vfpf_config_mac()
779 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bnx2x_vfpf_config_mac()
780 bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_mac()
792 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_mac()
798 int bnx2x_vfpf_config_rss(struct bnx2x *bp, bnx2x_vfpf_config_rss() argument
801 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_config_rss()
802 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; bnx2x_vfpf_config_rss()
806 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, bnx2x_vfpf_config_rss()
810 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_rss()
842 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_rss()
845 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_config_rss()
861 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_rss()
868 struct bnx2x *bp = netdev_priv(dev); bnx2x_vfpf_set_mcast() local
869 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_set_mcast()
870 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_set_mcast()
874 if (bp->state != BNX2X_STATE_OPEN) { bnx2x_vfpf_set_mcast()
875 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); bnx2x_vfpf_set_mcast()
880 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_set_mcast()
908 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
912 bnx2x_dp_tlv_list(bp, req);
913 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
925 bnx2x_vfpf_finalize(bp, &req->first_tlv);
931 int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) bnx2x_vfpf_update_vlan() argument
933 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_update_vlan()
934 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_update_vlan()
937 if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) { bnx2x_vfpf_update_vlan()
943 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_update_vlan()
956 bnx2x_sample_bulletin(bp); bnx2x_vfpf_update_vlan()
958 if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { bnx2x_vfpf_update_vlan()
967 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_update_vlan()
971 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_update_vlan()
974 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_update_vlan()
986 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_update_vlan()
991 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) bnx2x_vfpf_storm_rx_mode() argument
993 int mode = bp->rx_mode; bnx2x_vfpf_storm_rx_mode()
994 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_storm_rx_mode()
995 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; bnx2x_vfpf_storm_rx_mode()
999 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_storm_rx_mode()
1018 if (bp->accept_any_vlan) bnx2x_vfpf_storm_rx_mode()
1025 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_storm_rx_mode()
1029 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_storm_rx_mode()
1031 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); bnx2x_vfpf_storm_rx_mode()
1040 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_storm_rx_mode()
1046 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) storm_memset_vf_mbx_ack() argument
1051 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); storm_memset_vf_mbx_ack()
1054 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) storm_memset_vf_mbx_valid() argument
1059 REG_WR8(bp, addr, 1); storm_memset_vf_mbx_valid()
1063 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) bnx2x_vf_enable_mbx() argument
1065 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); bnx2x_vf_enable_mbx()
1068 storm_memset_vf_mbx_ack(bp, abs_vfid); bnx2x_vf_enable_mbx()
1069 storm_memset_vf_mbx_valid(bp, abs_vfid); bnx2x_vf_enable_mbx()
1072 bnx2x_vf_enable_access(bp, abs_vfid); bnx2x_vf_enable_mbx()
1076 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, bnx2x_copy32_vf_dmae() argument
1082 if (CHIP_IS_E1x(bp)) { bnx2x_copy32_vf_dmae()
1087 if (!bp->dmae_ready) { bnx2x_copy32_vf_dmae()
1093 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); bnx2x_copy32_vf_dmae()
1121 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); bnx2x_copy32_vf_dmae()
1124 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, bnx2x_vf_mbx_resp_single_tlv() argument
1127 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); bnx2x_vf_mbx_resp_single_tlv()
1135 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); bnx2x_vf_mbx_resp_single_tlv()
1136 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, bnx2x_vf_mbx_resp_single_tlv()
1140 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, bnx2x_vf_mbx_resp_send_msg() argument
1144 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); bnx2x_vf_mbx_resp_send_msg()
1150 bnx2x_dp_tlv_list(bp, resp); bnx2x_vf_mbx_resp_send_msg()
1167 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, bnx2x_vf_mbx_resp_send_msg()
1180 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); bnx2x_vf_mbx_resp_send_msg()
1186 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, bnx2x_vf_mbx_resp_send_msg()
1192 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_resp_send_msg()
1202 bnx2x_vf_release(bp, vf); bnx2x_vf_mbx_resp_send_msg()
1205 static void bnx2x_vf_mbx_resp(struct bnx2x *bp, bnx2x_vf_mbx_resp() argument
1209 bnx2x_vf_mbx_resp_single_tlv(bp, vf); bnx2x_vf_mbx_resp()
1210 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); bnx2x_vf_mbx_resp()
1213 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, bnx2x_vf_mbx_resp_phys_port() argument
1220 if (!(bp->flags & HAS_PHYS_PORT_ID)) bnx2x_vf_mbx_resp_phys_port()
1223 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, bnx2x_vf_mbx_resp_phys_port()
1228 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); bnx2x_vf_mbx_resp_phys_port()
1236 static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, bnx2x_vf_mbx_resp_fp_hsi_ver() argument
1243 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, bnx2x_vf_mbx_resp_fp_hsi_ver()
1256 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_acquire_resp() argument
1268 resp->pfdev_info.chip_num = bp->common.chip_id; bnx2x_vf_mbx_acquire_resp()
1269 resp->pfdev_info.db_size = bp->db_size; bnx2x_vf_mbx_acquire_resp()
1275 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_vf_mbx_acquire_resp()
1284 bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_mbx_acquire_resp()
1286 bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_mbx_acquire_resp()
1295 BP_VF_BULLETIN(bp, vf->index); bnx2x_vf_mbx_acquire_resp()
1341 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1347 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1349 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1355 bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length);
1357 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1361 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1364 static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp, bnx2x_vf_mbx_is_windows_vm() argument
1381 static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp, bnx2x_vf_mbx_acquire_chk_dorq() argument
1388 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, bnx2x_vf_mbx_acquire_chk_dorq()
1393 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire_chk_dorq()
1399 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_acquire() argument
1418 rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx); bnx2x_vf_mbx_acquire()
1429 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire()
1444 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); bnx2x_vf_mbx_acquire()
1466 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); bnx2x_vf_mbx_acquire()
1469 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_init_vf() argument
1478 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); bnx2x_vf_mbx_init_vf()
1486 bnx2x_iov_link_update_vf(bp, vf->index); bnx2x_vf_mbx_init_vf()
1489 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_init_vf()
1493 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, bnx2x_vf_mbx_set_q_flags() argument
1516 if (IS_MF_SD(bp)) bnx2x_vf_mbx_set_q_flags()
1520 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_setup_q() argument
1546 bnx2x_leading_vfq_init(bp, vf, q); bnx2x_vf_mbx_setup_q()
1570 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, bnx2x_vf_mbx_setup_q()
1574 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, bnx2x_vf_mbx_setup_q()
1584 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, bnx2x_vf_mbx_setup_q()
1602 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q()
1606 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q()
1634 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, bnx2x_vf_mbx_setup_q()
1638 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); bnx2x_vf_mbx_setup_q()
1640 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); bnx2x_vf_mbx_setup_q()
1645 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_setup_q()
1648 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, bnx2x_vf_mbx_macvlan_list() argument
1704 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, bnx2x_vf_mbx_dp_q_filter() argument
1715 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, bnx2x_vf_mbx_dp_q_filters() argument
1722 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, bnx2x_vf_mbx_dp_q_filters()
1737 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_vf_mbx_qfilters() argument
1742 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; bnx2x_vf_mbx_qfilters()
1749 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, bnx2x_vf_mbx_qfilters()
1757 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, bnx2x_vf_mbx_qfilters()
1767 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, bnx2x_vf_mbx_qfilters()
1774 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, bnx2x_vf_mbx_qfilters()
1786 BP_VF_BULLETIN(bp, vf->index); bnx2x_vf_mbx_qfilters()
1807 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); bnx2x_vf_mbx_qfilters()
1814 rc = bnx2x_vf_mcast(bp, vf, msg->multicast, bnx2x_vf_mbx_qfilters()
1826 static int bnx2x_filters_validate_mac(struct bnx2x *bp, bnx2x_filters_validate_mac() argument
1830 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); bnx2x_filters_validate_mac()
1876 static int bnx2x_filters_validate_vlan(struct bnx2x *bp, bnx2x_filters_validate_vlan() argument
1880 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); bnx2x_filters_validate_vlan()
1906 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, bnx2x_vf_mbx_set_q_filters() argument
1913 rc = bnx2x_filters_validate_mac(bp, vf, filters); bnx2x_vf_mbx_set_q_filters()
1917 rc = bnx2x_filters_validate_vlan(bp, vf, filters); bnx2x_vf_mbx_set_q_filters()
1926 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); bnx2x_vf_mbx_set_q_filters()
1928 rc = bnx2x_vf_mbx_qfilters(bp, vf); bnx2x_vf_mbx_set_q_filters()
1930 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_set_q_filters()
1933 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_teardown_q() argument
1942 rc = bnx2x_vf_queue_teardown(bp, vf, qid); bnx2x_vf_mbx_teardown_q()
1943 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_teardown_q()
1946 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_close_vf() argument
1953 rc = bnx2x_vf_close(bp, vf); bnx2x_vf_mbx_close_vf()
1954 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_close_vf()
1957 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_release_vf() argument
1964 rc = bnx2x_vf_free(bp, vf); bnx2x_vf_mbx_release_vf()
1965 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_release_vf()
1968 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_update_rss() argument
2024 rc = bnx2x_vf_rss_update(bp, vf, &rss); bnx2x_vf_mbx_update_rss()
2026 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_update_rss()
2029 static int bnx2x_validate_tpa_params(struct bnx2x *bp, bnx2x_validate_tpa_params() argument
2042 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { bnx2x_validate_tpa_params()
2046 MAX_AGG_QS(bp)); bnx2x_validate_tpa_params()
2052 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_update_tpa() argument
2061 if (bnx2x_validate_tpa_params(bp, tpa_tlv)) bnx2x_vf_mbx_update_tpa()
2087 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); bnx2x_vf_mbx_update_tpa()
2090 bnx2x_vf_mbx_resp(bp, vf, rc); bnx2x_vf_mbx_update_tpa()
2094 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_request() argument
2104 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_request()
2109 bnx2x_vf_mbx_acquire(bp, vf, mbx); bnx2x_vf_mbx_request()
2112 bnx2x_vf_mbx_init_vf(bp, vf, mbx); bnx2x_vf_mbx_request()
2115 bnx2x_vf_mbx_setup_q(bp, vf, mbx); bnx2x_vf_mbx_request()
2118 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); bnx2x_vf_mbx_request()
2121 bnx2x_vf_mbx_teardown_q(bp, vf, mbx); bnx2x_vf_mbx_request()
2124 bnx2x_vf_mbx_close_vf(bp, vf, mbx); bnx2x_vf_mbx_request()
2127 bnx2x_vf_mbx_release_vf(bp, vf, mbx); bnx2x_vf_mbx_request()
2130 bnx2x_vf_mbx_update_rss(bp, vf, mbx); bnx2x_vf_mbx_request()
2133 bnx2x_vf_mbx_update_tpa(bp, vf, mbx); bnx2x_vf_mbx_request()
2155 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); bnx2x_vf_mbx_request()
2161 storm_memset_vf_mbx_ack(bp, vf->abs_vfid); bnx2x_vf_mbx_request()
2164 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); bnx2x_vf_mbx_request()
2168 void bnx2x_vf_mbx_schedule(struct bnx2x *bp, bnx2x_vf_mbx_schedule() argument
2179 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > bnx2x_vf_mbx_schedule()
2180 BNX2X_NR_VIRTFN(bp)) { bnx2x_vf_mbx_schedule()
2182 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); bnx2x_vf_mbx_schedule()
2186 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); bnx2x_vf_mbx_schedule()
2189 mutex_lock(&BP_VFDB(bp)->event_mutex); bnx2x_vf_mbx_schedule()
2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; bnx2x_vf_mbx_schedule()
2191 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; bnx2x_vf_mbx_schedule()
2192 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); bnx2x_vf_mbx_schedule()
2193 mutex_unlock(&BP_VFDB(bp)->event_mutex); bnx2x_vf_mbx_schedule()
2195 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); bnx2x_vf_mbx_schedule()
2199 void bnx2x_vf_mbx(struct bnx2x *bp) bnx2x_vf_mbx() argument
2201 struct bnx2x_vfdb *vfdb = BP_VFDB(bp); bnx2x_vf_mbx()
2214 for_each_vf(bp, vf_idx) { for_each_vf()
2215 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); for_each_vf()
2216 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); for_each_vf()
2228 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, for_each_vf()
2235 bnx2x_vf_release(bp, vf); for_each_vf()
2248 bnx2x_vf_mbx_request(bp, vf, mbx); for_each_vf()
2264 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) bnx2x_post_vf_bulletin() argument
2266 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); bnx2x_post_vf_bulletin()
2267 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + bnx2x_post_vf_bulletin()
2269 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); bnx2x_post_vf_bulletin()
2273 if (bnx2x_vf(bp, vf, state) != VF_ENABLED && bnx2x_post_vf_bulletin()
2274 bnx2x_vf(bp, vf, state) != VF_ACQUIRED) bnx2x_post_vf_bulletin()
2280 (bnx2x_vf(bp, vf, cfg_flags) & bnx2x_post_vf_bulletin()
2284 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, bnx2x_post_vf_bulletin()
2285 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), bnx2x_post_vf_bulletin()
H A Dbnx2x_link.c226 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) bnx2x_bits_en() argument
228 u32 val = REG_RD(bp, reg); bnx2x_bits_en()
231 REG_WR(bp, reg, val); bnx2x_bits_en()
235 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) bnx2x_bits_dis() argument
237 u32 val = REG_RD(bp, reg); bnx2x_bits_dis()
240 REG_WR(bp, reg, val); bnx2x_bits_dis()
257 struct bnx2x *bp = params->bp; bnx2x_check_lfa() local
260 REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
268 REG_WR(bp, params->lfa_base + bnx2x_check_lfa()
275 link_status = REG_RD(bp, params->shmem_base + bnx2x_check_lfa()
304 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
313 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
322 saved_val = REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
332 cur_speed_cap_mask = REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
345 REG_RD(bp, params->lfa_base + bnx2x_check_lfa()
355 eee_status = REG_RD(bp, params->shmem2_base + bnx2x_check_lfa()
374 static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) bnx2x_get_epio() argument
386 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); bnx2x_get_epio()
387 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); bnx2x_get_epio()
389 *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; bnx2x_get_epio()
391 static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) bnx2x_set_epio() argument
403 gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); bnx2x_set_epio()
409 REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); bnx2x_set_epio()
412 gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); bnx2x_set_epio()
413 REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); bnx2x_set_epio()
416 static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) bnx2x_set_cfg_pin() argument
421 bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); bnx2x_set_cfg_pin()
425 bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); bnx2x_set_cfg_pin()
429 static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) bnx2x_get_cfg_pin() argument
434 bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); bnx2x_get_cfg_pin()
438 *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_get_cfg_pin()
449 struct bnx2x *bp = params->bp; bnx2x_ets_e2e3a0_disabled() local
460 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); bnx2x_ets_e2e3a0_disabled()
469 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); bnx2x_ets_e2e3a0_disabled()
471 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); bnx2x_ets_e2e3a0_disabled()
475 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_e2e3a0_disabled()
479 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); bnx2x_ets_e2e3a0_disabled()
480 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); bnx2x_ets_e2e3a0_disabled()
481 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); bnx2x_ets_e2e3a0_disabled()
483 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); bnx2x_ets_e2e3a0_disabled()
484 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); bnx2x_ets_e2e3a0_disabled()
485 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); bnx2x_ets_e2e3a0_disabled()
487 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); bnx2x_ets_e2e3a0_disabled()
491 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); bnx2x_ets_e2e3a0_disabled()
492 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); bnx2x_ets_e2e3a0_disabled()
494 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); bnx2x_ets_e2e3a0_disabled()
495 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); bnx2x_ets_e2e3a0_disabled()
497 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); bnx2x_ets_e2e3a0_disabled()
540 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_set_credit_upper_bound_nig() local
545 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
547 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
549 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
551 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
553 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
555 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : bnx2x_ets_e3b0_set_credit_upper_bound_nig()
559 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, bnx2x_ets_e3b0_set_credit_upper_bound_nig()
561 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, bnx2x_ets_e3b0_set_credit_upper_bound_nig()
563 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, bnx2x_ets_e3b0_set_credit_upper_bound_nig()
578 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_nig_disabled() local
587 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); bnx2x_ets_e3b0_nig_disabled()
588 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); bnx2x_ets_e3b0_nig_disabled()
590 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); bnx2x_ets_e3b0_nig_disabled()
591 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); bnx2x_ets_e3b0_nig_disabled()
596 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : bnx2x_ets_e3b0_nig_disabled()
603 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); bnx2x_ets_e3b0_nig_disabled()
604 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); bnx2x_ets_e3b0_nig_disabled()
607 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, bnx2x_ets_e3b0_nig_disabled()
609 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); bnx2x_ets_e3b0_nig_disabled()
620 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); bnx2x_ets_e3b0_nig_disabled()
622 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); bnx2x_ets_e3b0_nig_disabled()
624 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : bnx2x_ets_e3b0_nig_disabled()
633 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : bnx2x_ets_e3b0_nig_disabled()
635 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : bnx2x_ets_e3b0_nig_disabled()
637 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : bnx2x_ets_e3b0_nig_disabled()
639 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : bnx2x_ets_e3b0_nig_disabled()
641 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : bnx2x_ets_e3b0_nig_disabled()
643 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : bnx2x_ets_e3b0_nig_disabled()
646 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); bnx2x_ets_e3b0_nig_disabled()
647 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); bnx2x_ets_e3b0_nig_disabled()
648 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); bnx2x_ets_e3b0_nig_disabled()
662 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_set_credit_upper_bound_pbf() local
681 REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); bnx2x_ets_e3b0_set_credit_upper_bound_pbf()
694 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_pbf_disabled() local
707 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); bnx2x_ets_e3b0_pbf_disabled()
710 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); bnx2x_ets_e3b0_pbf_disabled()
715 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); bnx2x_ets_e3b0_pbf_disabled()
718 REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); bnx2x_ets_e3b0_pbf_disabled()
720 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : bnx2x_ets_e3b0_pbf_disabled()
724 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : bnx2x_ets_e3b0_pbf_disabled()
727 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : bnx2x_ets_e3b0_pbf_disabled()
741 REG_WR(bp, base_weight + (0x4 * i), 0); bnx2x_ets_e3b0_pbf_disabled()
753 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_disabled() local
755 if (!CHIP_IS_E3B0(bp)) { bnx2x_ets_e3b0_disabled()
776 struct bnx2x *bp = params->bp; bnx2x_ets_disabled() local
779 if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) bnx2x_ets_disabled()
781 else if (CHIP_IS_E3B0(bp)) bnx2x_ets_disabled()
801 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_cli_map() local
808 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : bnx2x_ets_e3b0_cli_map()
811 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : bnx2x_ets_e3b0_cli_map()
814 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : bnx2x_ets_e3b0_cli_map()
818 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : bnx2x_ets_e3b0_cli_map()
830 static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, bnx2x_ets_e3b0_set_cos_bw() argument
891 REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); bnx2x_ets_e3b0_set_cos_bw()
893 REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); bnx2x_ets_e3b0_set_cos_bw()
907 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_get_total_bw() local
967 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_sp_pri_to_cos_set() local
1048 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_sp_set_pri_cli_reg() local
1115 REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg()
1118 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); bnx2x_ets_e3b0_sp_set_pri_cli_reg()
1124 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg()
1126 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, bnx2x_ets_e3b0_sp_set_pri_cli_reg()
1129 REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); bnx2x_ets_e3b0_sp_set_pri_cli_reg()
1142 struct bnx2x *bp = params->bp; bnx2x_ets_e3b0_config() local
1155 if (!CHIP_IS_E3B0(bp)) { bnx2x_ets_e3b0_config()
1193 bp, cos_entry, min_w_val_nig, min_w_val_pbf, bnx2x_ets_e3b0_config()
1243 struct bnx2x *bp = params->bp; bnx2x_ets_bw_limit_common() local
1249 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); bnx2x_ets_bw_limit_common()
1256 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); bnx2x_ets_bw_limit_common()
1258 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, bnx2x_ets_bw_limit_common()
1260 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, bnx2x_ets_bw_limit_common()
1264 REG_WR(bp, PBF_REG_ETS_ENABLED, 1); bnx2x_ets_bw_limit_common()
1267 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); bnx2x_ets_bw_limit_common()
1275 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); bnx2x_ets_bw_limit_common()
1278 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, bnx2x_ets_bw_limit_common()
1280 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, bnx2x_ets_bw_limit_common()
1288 struct bnx2x *bp = params->bp; bnx2x_ets_bw_limit() local
1309 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); bnx2x_ets_bw_limit()
1310 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); bnx2x_ets_bw_limit()
1312 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); bnx2x_ets_bw_limit()
1313 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); bnx2x_ets_bw_limit()
1319 struct bnx2x *bp = params->bp; bnx2x_ets_strict() local
1330 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); bnx2x_ets_strict()
1334 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_strict()
1336 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); bnx2x_ets_strict()
1338 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); bnx2x_ets_strict()
1341 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); bnx2x_ets_strict()
1351 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); bnx2x_ets_strict()
1363 struct bnx2x *bp = params->bp; bnx2x_update_pfc_xmac() local
1394 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); bnx2x_update_pfc_xmac()
1395 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); bnx2x_update_pfc_xmac()
1396 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); bnx2x_update_pfc_xmac()
1402 REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); bnx2x_update_pfc_xmac()
1403 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); bnx2x_update_pfc_xmac()
1404 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); bnx2x_update_pfc_xmac()
1408 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, bnx2x_update_pfc_xmac()
1413 REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, bnx2x_update_pfc_xmac()
1423 static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, bnx2x_set_mdio_clk() argument
1431 cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); bnx2x_set_mdio_clk()
1433 if (USES_WARPCORE(bp)) bnx2x_set_mdio_clk()
1449 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); bnx2x_set_mdio_clk()
1453 static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp, bnx2x_set_mdio_emac_per_phy() argument
1460 bnx2x_set_mdio_clk(bp, params->chip_id, bnx2x_set_mdio_emac_per_phy()
1464 static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) bnx2x_is_4_port_mode() argument
1468 port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); bnx2x_is_4_port_mode()
1474 return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); bnx2x_is_4_port_mode()
1481 struct bnx2x *bp = params->bp; bnx2x_emac_init() local
1487 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_emac_init()
1490 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_emac_init()
1495 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_init()
1496 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); bnx2x_emac_init()
1500 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_init()
1509 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_emac_init()
1513 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); bnx2x_emac_init()
1519 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); bnx2x_emac_init()
1526 struct bnx2x *bp = params->bp; bnx2x_set_xumac_nig() local
1528 REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, bnx2x_set_xumac_nig()
1530 REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, bnx2x_set_xumac_nig()
1532 REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : bnx2x_set_xumac_nig()
1540 struct bnx2x *bp = params->bp; bnx2x_set_umac_rxtx() local
1541 if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_umac_rxtx()
1544 val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); bnx2x_set_umac_rxtx()
1552 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_set_umac_rxtx()
1560 struct bnx2x *bp = params->bp; bnx2x_umac_enable() local
1562 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_umac_enable()
1566 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_umac_enable()
1572 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); bnx2x_umac_enable()
1605 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable()
1611 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, bnx2x_umac_enable()
1613 REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); bnx2x_umac_enable()
1615 REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); bnx2x_umac_enable()
1619 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, bnx2x_umac_enable()
1624 REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, bnx2x_umac_enable()
1632 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable()
1641 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); bnx2x_umac_enable()
1646 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); bnx2x_umac_enable()
1656 struct bnx2x *bp = params->bp; bnx2x_xmac_init() local
1657 u32 is_port4mode = bnx2x_is_4_port_mode(bp); bnx2x_xmac_init()
1665 if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || bnx2x_xmac_init()
1666 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || bnx2x_xmac_init()
1667 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) && bnx2x_xmac_init()
1669 (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_xmac_init()
1677 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_xmac_init()
1681 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_xmac_init()
1687 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); bnx2x_xmac_init()
1690 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); bnx2x_xmac_init()
1693 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); bnx2x_xmac_init()
1698 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); bnx2x_xmac_init()
1703 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); bnx2x_xmac_init()
1707 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_xmac_init()
1711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_xmac_init()
1719 struct bnx2x *bp = params->bp; bnx2x_set_xmac_rxtx() local
1723 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_xmac_rxtx()
1729 pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); bnx2x_set_xmac_rxtx()
1730 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, bnx2x_set_xmac_rxtx()
1732 REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, bnx2x_set_xmac_rxtx()
1735 val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); bnx2x_set_xmac_rxtx()
1740 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); bnx2x_set_xmac_rxtx()
1748 struct bnx2x *bp = params->bp; bnx2x_xmac_enable() local
1762 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); bnx2x_xmac_enable()
1768 REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL, bnx2x_xmac_enable()
1771 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); bnx2x_xmac_enable()
1772 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, bnx2x_xmac_enable()
1777 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); bnx2x_xmac_enable()
1780 REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); bnx2x_xmac_enable()
1787 REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); bnx2x_xmac_enable()
1788 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); bnx2x_xmac_enable()
1790 REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); bnx2x_xmac_enable()
1805 REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); bnx2x_xmac_enable()
1817 struct bnx2x *bp = params->bp; bnx2x_emac_enable() local
1825 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_emac_enable()
1829 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); bnx2x_emac_enable()
1839 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); bnx2x_emac_enable()
1841 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); bnx2x_emac_enable()
1846 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); bnx2x_emac_enable()
1849 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, bnx2x_emac_enable()
1851 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable()
1855 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, bnx2x_emac_enable()
1858 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable()
1864 bnx2x_bits_en(bp, emac_base + bnx2x_emac_enable()
1869 bnx2x_bits_en(bp, emac_base + bnx2x_emac_enable()
1874 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, bnx2x_emac_enable()
1878 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); bnx2x_emac_enable()
1888 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); bnx2x_emac_enable()
1892 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, bnx2x_emac_enable()
1897 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, bnx2x_emac_enable()
1904 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); bnx2x_emac_enable()
1907 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); bnx2x_emac_enable()
1912 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); bnx2x_emac_enable()
1915 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); bnx2x_emac_enable()
1918 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, bnx2x_emac_enable()
1923 REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); bnx2x_emac_enable()
1926 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); bnx2x_emac_enable()
1927 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); bnx2x_emac_enable()
1928 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); bnx2x_emac_enable()
1931 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); bnx2x_emac_enable()
1938 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); bnx2x_emac_enable()
1939 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); bnx2x_emac_enable()
1941 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); bnx2x_emac_enable()
1951 struct bnx2x *bp = params->bp; bnx2x_update_pfc_bmac1() local
1963 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac1()
1973 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac1()
1984 struct bnx2x *bp = params->bp; bnx2x_update_pfc_bmac2() local
1996 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2()
2007 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2()
2019 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, bnx2x_update_pfc_bmac2()
2030 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2()
2043 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, bnx2x_update_pfc_bmac2()
2058 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_update_pfc_bmac2()
2066 static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, bnx2x_pfc_nig_rx_priority_mask() argument
2105 REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); bnx2x_pfc_nig_rx_priority_mask()
2111 struct bnx2x *bp = params->bp; bnx2x_update_mng() local
2113 REG_WR(bp, params->shmem_base + bnx2x_update_mng()
2120 struct bnx2x *bp = params->bp; bnx2x_update_link_attr() local
2122 if (SHMEM2_HAS(bp, link_attr_sync)) bnx2x_update_link_attr()
2123 REG_WR(bp, params->shmem2_base + bnx2x_update_link_attr()
2135 struct bnx2x *bp = params->bp; bnx2x_update_pfc_nig() local
2146 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : bnx2x_update_pfc_nig()
2155 if (CHIP_IS_E3(bp)) bnx2x_update_pfc_nig()
2176 if (CHIP_IS_E3(bp)) bnx2x_update_pfc_nig()
2177 REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : bnx2x_update_pfc_nig()
2179 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : bnx2x_update_pfc_nig()
2181 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : bnx2x_update_pfc_nig()
2183 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : bnx2x_update_pfc_nig()
2186 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : bnx2x_update_pfc_nig()
2189 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : bnx2x_update_pfc_nig()
2192 REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : bnx2x_update_pfc_nig()
2196 REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : bnx2x_update_pfc_nig()
2200 REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE : bnx2x_update_pfc_nig()
2208 bnx2x_pfc_nig_rx_priority_mask(bp, i, bnx2x_update_pfc_nig()
2211 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : bnx2x_update_pfc_nig()
2215 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : bnx2x_update_pfc_nig()
2219 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : bnx2x_update_pfc_nig()
2233 struct bnx2x *bp = params->bp; bnx2x_update_pfc() local
2251 if (CHIP_IS_E3(bp)) { bnx2x_update_pfc()
2255 val = REG_RD(bp, MISC_REG_RESET_REG_2); bnx2x_update_pfc()
2263 if (CHIP_IS_E2(bp)) bnx2x_update_pfc()
2273 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); bnx2x_update_pfc()
2282 struct bnx2x *bp = params->bp; bnx2x_bmac1_enable() local
2294 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, bnx2x_bmac1_enable()
2304 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); bnx2x_bmac1_enable()
2314 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_bmac1_enable()
2319 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable()
2326 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable()
2331 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); bnx2x_bmac1_enable()
2336 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, bnx2x_bmac1_enable()
2346 struct bnx2x *bp = params->bp; bnx2x_bmac2_enable() local
2356 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); bnx2x_bmac2_enable()
2362 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, bnx2x_bmac2_enable()
2374 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, bnx2x_bmac2_enable()
2382 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, bnx2x_bmac2_enable()
2389 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable()
2395 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable()
2400 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); bnx2x_bmac2_enable()
2413 struct bnx2x *bp = params->bp; bnx2x_bmac_enable() local
2417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_bmac_enable()
2422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, bnx2x_bmac_enable()
2426 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); bnx2x_bmac_enable()
2429 if (CHIP_IS_E2(bp)) bnx2x_bmac_enable()
2433 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); bnx2x_bmac_enable()
2434 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); bnx2x_bmac_enable()
2435 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); bnx2x_bmac_enable()
2441 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); bnx2x_bmac_enable()
2442 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); bnx2x_bmac_enable()
2443 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); bnx2x_bmac_enable()
2444 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); bnx2x_bmac_enable()
2445 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); bnx2x_bmac_enable()
2446 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); bnx2x_bmac_enable()
2452 static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) bnx2x_set_bmac_rx() argument
2457 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); bnx2x_set_bmac_rx()
2459 if (CHIP_IS_E2(bp)) bnx2x_set_bmac_rx()
2464 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_set_bmac_rx()
2468 REG_RD_DMAE(bp, bmac_addr, wb_data, 2); bnx2x_set_bmac_rx()
2473 REG_WR_DMAE(bp, bmac_addr, wb_data, 2); bnx2x_set_bmac_rx()
2481 struct bnx2x *bp = params->bp; bnx2x_pbf_update() local
2487 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); bnx2x_pbf_update()
2490 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); bnx2x_pbf_update()
2491 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update()
2496 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update()
2499 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); bnx2x_pbf_update()
2511 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); bnx2x_pbf_update()
2513 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); bnx2x_pbf_update()
2520 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); bnx2x_pbf_update()
2522 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); bnx2x_pbf_update()
2534 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); bnx2x_pbf_update()
2539 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); bnx2x_pbf_update()
2541 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); bnx2x_pbf_update()
2544 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); bnx2x_pbf_update()
2551 * @bp: driver handle
2563 static u32 bnx2x_get_emac_base(struct bnx2x *bp, bnx2x_get_emac_base() argument
2571 if (REG_RD(bp, NIG_REG_PORT_SWAP)) bnx2x_get_emac_base()
2577 if (REG_RD(bp, NIG_REG_PORT_SWAP)) bnx2x_get_emac_base()
2598 static int bnx2x_cl22_write(struct bnx2x *bp, bnx2x_cl22_write() argument
2606 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); bnx2x_cl22_write()
2607 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, bnx2x_cl22_write()
2614 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl22_write()
2619 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl22_write()
2629 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); bnx2x_cl22_write()
2633 static int bnx2x_cl22_read(struct bnx2x *bp, bnx2x_cl22_read() argument
2642 mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); bnx2x_cl22_read()
2643 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, bnx2x_cl22_read()
2650 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl22_read()
2655 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl22_read()
2668 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); bnx2x_cl22_read()
2675 static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_read() argument
2683 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_cl45_read()
2684 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_cl45_read()
2685 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); bnx2x_cl45_read()
2689 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_read()
2695 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl45_read()
2700 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl45_read()
2708 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_read()
2716 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); bnx2x_cl45_read()
2721 val = REG_RD(bp, phy->mdio_ctrl + bnx2x_cl45_read()
2730 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_read()
2740 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); bnx2x_cl45_read()
2745 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_read()
2750 static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_write() argument
2758 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_cl45_write()
2759 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_cl45_write()
2760 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); bnx2x_cl45_write()
2764 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_write()
2771 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl45_write()
2776 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); bnx2x_cl45_write()
2784 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_write()
2791 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); bnx2x_cl45_write()
2796 tmp = REG_RD(bp, phy->mdio_ctrl + bnx2x_cl45_write()
2805 netdev_err(bp->dev, "MDC/MDIO access timeout\n"); bnx2x_cl45_write()
2814 bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); bnx2x_cl45_write()
2818 bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, bnx2x_cl45_write()
2828 struct bnx2x *bp = params->bp; bnx2x_eee_has_cap() local
2830 if (REG_RD(bp, params->shmem2_base) <= bnx2x_eee_has_cap()
2880 struct bnx2x *bp = params->bp; bnx2x_eee_calc_timer() local
2895 eee_mode = ((REG_RD(bp, params->shmem_base + bnx2x_eee_calc_timer()
2913 struct bnx2x *bp = params->bp; bnx2x_eee_set_timers() local
2918 REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), bnx2x_eee_set_timers()
2965 struct bnx2x *bp = params->bp; bnx2x_eee_disable() local
2968 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); bnx2x_eee_disable()
2970 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); bnx2x_eee_disable()
2981 struct bnx2x *bp = params->bp; bnx2x_eee_advertise() local
2985 REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); bnx2x_eee_advertise()
2996 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); bnx2x_eee_advertise()
3006 struct bnx2x *bp = params->bp; bnx2x_update_mng_eee() local
3009 REG_WR(bp, params->shmem2_base + bnx2x_update_mng_eee()
3018 struct bnx2x *bp = params->bp; bnx2x_eee_an_resolve() local
3023 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); bnx2x_eee_an_resolve()
3024 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); bnx2x_eee_an_resolve()
3069 struct bnx2x *bp = params->bp; bnx2x_bsc_module_sel() local
3072 board_cfg = REG_RD(bp, params->shmem_base + bnx2x_bsc_module_sel()
3080 sfp_ctrl = REG_RD(bp, params->shmem_base + bnx2x_bsc_module_sel()
3087 bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); bnx2x_bsc_module_sel()
3091 struct bnx2x *bp, bnx2x_bsc_read()
3111 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read()
3113 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read()
3117 REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); bnx2x_bsc_read()
3124 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read()
3128 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read()
3131 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read()
3148 REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); bnx2x_bsc_read()
3152 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read()
3155 val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); bnx2x_bsc_read()
3166 data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); bnx2x_bsc_read()
3177 static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_read_or_write() argument
3181 bnx2x_cl45_read(bp, phy, devad, reg, &val); bnx2x_cl45_read_or_write()
3182 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); bnx2x_cl45_read_or_write()
3185 static void bnx2x_cl45_read_and_write(struct bnx2x *bp, bnx2x_cl45_read_and_write() argument
3190 bnx2x_cl45_read(bp, phy, devad, reg, &val); bnx2x_cl45_read_and_write()
3191 bnx2x_cl45_write(bp, phy, devad, reg, val & and_val); bnx2x_cl45_read_and_write()
3203 return bnx2x_cl45_read(params->bp, bnx2x_phy_read()
3220 return bnx2x_cl45_write(params->bp, bnx2x_phy_write()
3231 struct bnx2x *bp = params->bp; bnx2x_get_warpcore_lane() local
3235 path = BP_PATH(bp); bnx2x_get_warpcore_lane()
3238 if (bnx2x_is_4_port_mode(bp)) { bnx2x_get_warpcore_lane()
3242 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); bnx2x_get_warpcore_lane()
3246 path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); bnx2x_get_warpcore_lane()
3252 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); bnx2x_get_warpcore_lane()
3256 port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); bnx2x_get_warpcore_lane()
3266 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); bnx2x_get_warpcore_lane()
3271 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); bnx2x_get_warpcore_lane()
3286 struct bnx2x *bp = params->bp; bnx2x_set_aer_mmd() local
3294 if (USES_WARPCORE(bp)) { bnx2x_set_aer_mmd()
3304 } else if (CHIP_IS_E2(bp)) bnx2x_set_aer_mmd()
3309 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_set_aer_mmd()
3318 static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) bnx2x_set_serdes_access() argument
3323 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); bnx2x_set_serdes_access()
3324 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); bnx2x_set_serdes_access()
3326 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); bnx2x_set_serdes_access()
3329 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); bnx2x_set_serdes_access()
3332 static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) bnx2x_serdes_deassert() argument
3341 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); bnx2x_serdes_deassert()
3343 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); bnx2x_serdes_deassert()
3345 bnx2x_set_serdes_access(bp, port); bnx2x_serdes_deassert()
3347 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, bnx2x_serdes_deassert()
3355 struct bnx2x *bp = params->bp; bnx2x_xgxs_specific_func() local
3359 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); bnx2x_xgxs_specific_func()
3360 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, bnx2x_xgxs_specific_func()
3368 struct bnx2x *bp = params->bp; bnx2x_xgxs_deassert() local
3377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); bnx2x_xgxs_deassert()
3379 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); bnx2x_xgxs_deassert()
3387 struct bnx2x *bp = params->bp; bnx2x_calc_ieee_aneg_adv() local
3428 struct bnx2x *bp = params->bp; set_phy_vars() local
3471 struct bnx2x *bp = params->bp; bnx2x_ext_phy_set_pause() local
3473 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); bnx2x_ext_phy_set_pause()
3490 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); bnx2x_ext_phy_set_pause()
3498 struct bnx2x *bp = params->bp; bnx2x_pause_resolve() local
3547 struct bnx2x *bp = params->bp; bnx2x_ext_phy_update_adv_fc() local
3549 bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); bnx2x_ext_phy_update_adv_fc()
3550 bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); bnx2x_ext_phy_update_adv_fc()
3551 } else if (CHIP_IS_E3(bp) && bnx2x_ext_phy_update_adv_fc()
3555 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc()
3562 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc()
3564 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc()
3567 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc()
3569 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_ext_phy_update_adv_fc()
3579 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc()
3582 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_update_adv_fc()
3638 struct bnx2x *bp = params->bp; bnx2x_warpcore_enable_AN_KR2() local
3661 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR2()
3665 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_enable_AN_KR2()
3677 struct bnx2x *bp = params->bp; bnx2x_disable_kr2() local
3700 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_disable_kr2()
3711 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_lpi_passthrough() local
3714 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_lpi_passthrough()
3716 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_lpi_passthrough()
3724 struct bnx2x *bp = params->bp; bnx2x_warpcore_restart_AN_KR() local
3726 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_restart_AN_KR()
3728 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_restart_AN_KR()
3740 struct bnx2x *bp = params->bp; bnx2x_warpcore_enable_AN_KR() local
3754 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_enable_AN_KR()
3757 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3761 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3772 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); bnx2x_warpcore_enable_AN_KR()
3781 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_enable_AN_KR()
3784 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR()
3792 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3797 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3800 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3803 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3808 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR()
3812 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_enable_AN_KR()
3818 if (REG_RD(bp, params->shmem_base + bnx2x_warpcore_enable_AN_KR()
3822 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3831 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3835 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3842 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_enable_AN_KR()
3845 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3849 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3856 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3858 wc_lane_config = REG_RD(bp, params->shmem_base + bnx2x_warpcore_enable_AN_KR()
3861 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3876 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_enable_AN_KR()
3891 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_10G_KR() local
3907 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_warpcore_set_10G_KR()
3912 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_10G_KR()
3915 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3918 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3921 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3924 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3929 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_10G_KR()
3932 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_10G_KR()
3936 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3940 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3944 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3948 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3950 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_KR()
3959 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_10G_XFI() local
3965 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3969 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3973 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); bnx2x_warpcore_set_10G_XFI()
3976 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3980 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3984 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3988 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3993 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
3995 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4000 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4010 cfg_tap_val = REG_RD(bp, params->shmem_base + bnx2x_warpcore_set_10G_XFI()
4056 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4061 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4064 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4069 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4073 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4079 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4083 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4087 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_10G_XFI()
4095 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_20G_force_KR2() local
4097 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_20G_force_KR2()
4101 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4106 bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4108 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4111 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4115 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4119 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4122 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4125 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4129 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4131 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4135 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_set_20G_force_KR2()
4138 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_force_KR2()
4144 static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, bnx2x_warpcore_set_20G_DXGXS() argument
4149 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4153 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4156 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4159 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4162 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4165 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4168 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4171 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4174 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4177 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4181 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4185 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4189 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4193 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_20G_DXGXS()
4203 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_sgmii_speed() local
4207 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4214 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4219 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4240 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4245 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4251 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4258 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4263 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4265 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4270 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4275 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_sgmii_speed()
4280 static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, bnx2x_warpcore_reset_lane() argument
4286 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane()
4292 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane()
4294 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_reset_lane()
4302 struct bnx2x *bp = params->bp; bnx2x_warpcore_clear_regs() local
4321 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_clear_regs()
4325 bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, bnx2x_warpcore_clear_regs()
4329 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_clear_regs()
4334 static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, bnx2x_get_mod_abs_int_cfg() argument
4342 if (CHIP_IS_E3(bp)) { bnx2x_get_mod_abs_int_cfg()
4343 cfg_pin = (REG_RD(bp, shmem_base + bnx2x_get_mod_abs_int_cfg()
4376 struct bnx2x *bp = params->bp; bnx2x_is_sfp_module_plugged() local
4379 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, bnx2x_is_sfp_module_plugged()
4383 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_is_sfp_module_plugged()
4395 struct bnx2x *bp = params->bp; bnx2x_warpcore_get_sigdet() local
4399 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, bnx2x_warpcore_get_sigdet()
4409 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_runtime() local
4420 serdes_net_if = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_config_runtime()
4428 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, bnx2x_warpcore_config_runtime()
4438 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_runtime()
4439 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_config_runtime()
4442 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_config_runtime()
4462 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_sfi() local
4479 struct bnx2x *bp = params->bp; bnx2x_sfp_e3_set_transmitter() local
4483 cfg_pin = REG_RD(bp, params->shmem_base + bnx2x_sfp_e3_set_transmitter()
4491 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); bnx2x_sfp_e3_set_transmitter()
4493 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); bnx2x_sfp_e3_set_transmitter()
4500 struct bnx2x *bp = params->bp; bnx2x_warpcore_config_init() local
4504 serdes_net_if = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_config_init()
4512 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_config_init()
4578 bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); bnx2x_warpcore_config_init()
4600 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_config_init()
4607 struct bnx2x *bp = params->bp; bnx2x_warpcore_link_reset() local
4610 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_warpcore_link_reset()
4613 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_link_reset()
4617 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4620 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4624 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_warpcore_link_reset()
4627 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4631 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4635 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4640 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4643 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4652 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_link_reset()
4662 struct bnx2x *bp = params->bp; bnx2x_set_warpcore_loopback() local
4673 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_set_warpcore_loopback()
4676 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback()
4681 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback()
4686 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback()
4694 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback()
4697 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, bnx2x_set_warpcore_loopback()
4707 struct bnx2x *bp = params->bp; bnx2x_sync_link() local
4775 USES_WARPCORE(bp) && bnx2x_sync_link()
4782 if (USES_WARPCORE(bp)) bnx2x_sync_link()
4787 if (USES_WARPCORE(bp)) bnx2x_sync_link()
4813 struct bnx2x *bp = params->bp; bnx2x_link_status_update() local
4819 vars->link_status = REG_RD(bp, params->shmem_base + bnx2x_link_status_update()
4829 vars->eee_status = REG_RD(bp, params->shmem2_base + bnx2x_link_status_update()
4839 media_types = REG_RD(bp, sync_offset); bnx2x_link_status_update()
4857 vars->aeu_int_mask = REG_RD(bp, sync_offset); bnx2x_link_status_update()
4867 if (SHMEM2_HAS(bp, link_attr_sync)) bnx2x_link_status_update()
4868 params->link_attr_sync = SHMEM2_RD(bp, bnx2x_link_status_update()
4880 struct bnx2x *bp = params->bp; bnx2x_set_master_ln() local
4887 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_master_ln()
4892 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_master_ln()
4902 struct bnx2x *bp = params->bp; bnx2x_reset_unicore() local
4905 CL22_RD_OVER_CL45(bp, phy, bnx2x_reset_unicore()
4910 CL22_WR_OVER_CL45(bp, phy, bnx2x_reset_unicore()
4916 bnx2x_set_serdes_access(bp, params->port); bnx2x_reset_unicore()
4923 CL22_RD_OVER_CL45(bp, phy, bnx2x_reset_unicore()
4934 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_reset_unicore()
4945 struct bnx2x *bp = params->bp; bnx2x_set_swap_lanes() local
4959 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes()
4966 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes()
4972 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes()
4978 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_swap_lanes()
4987 struct bnx2x *bp = params->bp; bnx2x_set_parallel_detection() local
4989 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
4999 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
5009 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
5014 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
5023 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
5029 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_parallel_detection()
5042 struct bnx2x *bp = params->bp; bnx2x_set_autoneg() local
5046 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5057 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5063 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5074 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5079 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5092 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5099 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5105 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5113 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5124 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5135 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_autoneg()
5145 struct bnx2x *bp = params->bp; bnx2x_program_serdes() local
5149 CL22_RD_OVER_CL45(bp, phy, bnx2x_program_serdes()
5157 CL22_WR_OVER_CL45(bp, phy, bnx2x_program_serdes()
5164 CL22_RD_OVER_CL45(bp, phy, bnx2x_program_serdes()
5184 CL22_WR_OVER_CL45(bp, phy, bnx2x_program_serdes()
5193 struct bnx2x *bp = params->bp; bnx2x_set_brcm_cl37_advertisement() local
5201 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_brcm_cl37_advertisement()
5205 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_brcm_cl37_advertisement()
5214 struct bnx2x *bp = params->bp; bnx2x_set_ieee_aneg_advertisement() local
5218 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement()
5221 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement()
5226 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_ieee_aneg_advertisement()
5235 struct bnx2x *bp = params->bp; bnx2x_restart_autoneg() local
5242 CL22_RD_OVER_CL45(bp, phy, bnx2x_restart_autoneg()
5247 CL22_WR_OVER_CL45(bp, phy, bnx2x_restart_autoneg()
5255 CL22_RD_OVER_CL45(bp, phy, bnx2x_restart_autoneg()
5262 CL22_WR_OVER_CL45(bp, phy, bnx2x_restart_autoneg()
5275 struct bnx2x *bp = params->bp; bnx2x_initialize_sgmii_process() local
5280 CL22_RD_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process()
5289 CL22_WR_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process()
5299 CL22_RD_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process()
5330 CL22_WR_OVER_CL45(bp, phy, bnx2x_initialize_sgmii_process()
5346 struct bnx2x *bp = params->bp; bnx2x_direct_parallel_detect_used() local
5350 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used()
5354 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used()
5364 CL22_RD_OVER_CL45(bp, phy, bnx2x_direct_parallel_detect_used()
5385 struct bnx2x *bp = params->bp; bnx2x_update_adv_fc() local
5392 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc()
5396 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc()
5406 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc()
5410 CL22_RD_OVER_CL45(bp, phy, bnx2x_update_adv_fc()
5429 struct bnx2x *bp = params->bp; bnx2x_flow_ctrl_resolve() local
5455 struct bnx2x *bp = params->bp; bnx2x_check_fallback_to_cl37() local
5459 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37()
5467 CL22_WR_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37()
5474 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37()
5490 CL22_RD_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37()
5511 CL22_WR_OVER_CL45(bp, phy, bnx2x_check_fallback_to_cl37()
5540 struct bnx2x *bp = params->bp; bnx2x_get_link_speed_duplex() local
5628 struct bnx2x *bp = params->bp; bnx2x_link_settings_status() local
5634 CL22_RD_OVER_CL45(bp, phy, bnx2x_link_settings_status()
5671 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1, bnx2x_link_settings_status()
5682 CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, bnx2x_link_settings_status()
5702 struct bnx2x *bp = params->bp; bnx2x_warpcore_read_status() local
5710 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5712 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5718 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5720 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5728 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5739 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status()
5741 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status()
5749 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5757 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5773 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_warpcore_read_status()
5784 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5798 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5801 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_read_status()
5825 struct bnx2x *bp = params->bp; bnx2x_set_gmii_tx_driver() local
5832 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver()
5846 CL22_RD_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver()
5855 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_gmii_tx_driver()
5865 struct bnx2x *bp = params->bp; bnx2x_emac_program() local
5870 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + bnx2x_emac_program()
5901 bnx2x_bits_en(bp, bnx2x_emac_program()
5914 struct bnx2x *bp = params->bp; bnx2x_set_preemphasis() local
5918 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_preemphasis()
5926 CL22_WR_OVER_CL45(bp, phy, bnx2x_set_preemphasis()
5937 struct bnx2x *bp = params->bp; bnx2x_xgxs_config_init() local
6021 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, bnx2x_wait_reset_complete() argument
6029 bnx2x_cl22_read(bp, phy, bnx2x_wait_reset_complete()
6032 bnx2x_cl45_read(bp, phy, bnx2x_wait_reset_complete()
6041 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_wait_reset_complete()
6052 struct bnx2x *bp = params->bp; bnx2x_link_int_enable() local
6055 if (CHIP_IS_E3(bp)) { bnx2x_link_int_enable()
6080 bnx2x_bits_en(bp, bnx2x_link_int_enable()
6086 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); bnx2x_link_int_enable()
6088 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bnx2x_link_int_enable()
6089 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), bnx2x_link_int_enable()
6090 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); bnx2x_link_int_enable()
6092 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), bnx2x_link_int_enable()
6093 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); bnx2x_link_int_enable()
6096 static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, bnx2x_rearm_latch_signal() argument
6106 latch_status = REG_RD(bp, bnx2x_rearm_latch_signal()
6111 bnx2x_bits_en(bp, bnx2x_rearm_latch_signal()
6116 bnx2x_bits_dis(bp, bnx2x_rearm_latch_signal()
6124 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, bnx2x_rearm_latch_signal()
6133 struct bnx2x *bp = params->bp; bnx2x_link_int_ack() local
6139 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, bnx2x_link_int_ack()
6144 if (USES_WARPCORE(bp)) bnx2x_link_int_ack()
6164 bnx2x_bits_en(bp, bnx2x_link_int_ack()
6219 struct bnx2x *bp; bnx2x_get_ext_phy_fw_version() local
6226 bp = params->bp; bnx2x_get_ext_phy_fw_version()
6230 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); bnx2x_get_ext_phy_fw_version()
6240 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); bnx2x_get_ext_phy_fw_version()
6260 struct bnx2x *bp = params->bp; bnx2x_set_xgxs_loopback() local
6267 if (!CHIP_IS_E3(bp)) { bnx2x_set_xgxs_loopback()
6269 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + bnx2x_set_xgxs_loopback()
6272 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, bnx2x_set_xgxs_loopback()
6276 bnx2x_cl45_write(bp, phy, bnx2x_set_xgxs_loopback()
6282 bnx2x_cl45_write(bp, phy, bnx2x_set_xgxs_loopback()
6291 if (!CHIP_IS_E3(bp)) { bnx2x_set_xgxs_loopback()
6293 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, bnx2x_set_xgxs_loopback()
6299 bnx2x_cl45_read(bp, phy, 5, bnx2x_set_xgxs_loopback()
6303 bnx2x_cl45_write(bp, phy, 5, bnx2x_set_xgxs_loopback()
6320 struct bnx2x *bp = params->bp; bnx2x_set_led() local
6335 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); bnx2x_set_led()
6336 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led()
6339 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led()
6348 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp); bnx2x_set_led()
6362 CHIP_IS_E2(bp) && params->num_phys == 2) { bnx2x_set_led()
6366 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led()
6367 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); bnx2x_set_led()
6369 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led()
6370 EMAC_WR(bp, EMAC_REG_EMAC_LED, bnx2x_set_led()
6384 if ((!CHIP_IS_E3(bp)) || bnx2x_set_led()
6385 (CHIP_IS_E3(bp) && bnx2x_set_led()
6387 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); bnx2x_set_led()
6389 if (CHIP_IS_E1x(bp) || bnx2x_set_led()
6390 CHIP_IS_E2(bp) || bnx2x_set_led()
6392 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led()
6394 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led()
6399 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); bnx2x_set_led()
6400 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led()
6401 EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | bnx2x_set_led()
6413 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, bnx2x_set_led()
6417 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); bnx2x_set_led()
6419 if (CHIP_IS_E3(bp)) bnx2x_set_led()
6420 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, bnx2x_set_led()
6423 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, bnx2x_set_led()
6425 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + bnx2x_set_led()
6427 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); bnx2x_set_led()
6428 EMAC_WR(bp, EMAC_REG_EMAC_LED, bnx2x_set_led()
6431 if (CHIP_IS_E1(bp) && bnx2x_set_led()
6437 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 bnx2x_set_led()
6439 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + bnx2x_set_led()
6441 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + bnx2x_set_led()
6462 struct bnx2x *bp = params->bp; bnx2x_test_link() local
6468 if (CHIP_IS_E3(bp)) { bnx2x_test_link()
6473 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link()
6475 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link()
6481 bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, bnx2x_test_link()
6491 CL22_RD_OVER_CL45(bp, int_phy, bnx2x_test_link()
6544 struct bnx2x *bp = params->bp; bnx2x_link_initialize() local
6556 if (!USES_WARPCORE(bp)) bnx2x_link_initialize()
6567 (CHIP_IS_E1x(bp) || bnx2x_link_initialize()
6568 CHIP_IS_E2(bp))) bnx2x_link_initialize()
6609 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + bnx2x_link_initialize()
6622 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, bnx2x_int_link_reset()
6629 struct bnx2x *bp = params->bp; bnx2x_common_ext_link_reset() local
6632 if (CHIP_IS_E2(bp)) bnx2x_common_ext_link_reset()
6633 gpio_port = BP_PATH(bp); bnx2x_common_ext_link_reset()
6636 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_common_ext_link_reset()
6639 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_common_ext_link_reset()
6648 struct bnx2x *bp = params->bp; bnx2x_update_link_down() local
6663 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); bnx2x_update_link_down()
6666 if (!CHIP_IS_E3(bp)) bnx2x_update_link_down()
6667 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_update_link_down()
6671 if (CHIP_IS_E1x(bp) || bnx2x_update_link_down()
6672 CHIP_IS_E2(bp)) bnx2x_update_link_down()
6673 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); bnx2x_update_link_down()
6675 if (CHIP_IS_E3(bp)) { bnx2x_update_link_down()
6677 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), bnx2x_update_link_down()
6679 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), bnx2x_update_link_down()
6696 struct bnx2x *bp = params->bp; bnx2x_update_link_up() local
6711 if (USES_WARPCORE(bp)) { bnx2x_update_link_up()
6728 REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + bnx2x_update_link_up()
6730 REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); bnx2x_update_link_up()
6731 REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + bnx2x_update_link_up()
6735 if ((CHIP_IS_E1x(bp) || bnx2x_update_link_up()
6736 CHIP_IS_E2(bp))) { bnx2x_update_link_up()
6762 if (CHIP_IS_E1x(bp)) bnx2x_update_link_up()
6767 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); bnx2x_update_link_up()
6785 struct bnx2x *bp = params->bp; bnx2x_chng_link_count() local
6789 if (!(SHMEM2_HAS(bp, link_change_count))) bnx2x_chng_link_count()
6797 val = REG_RD(bp, addr) + 1; bnx2x_chng_link_count()
6798 REG_WR(bp, addr, val); bnx2x_chng_link_count()
6815 struct bnx2x *bp = params->bp; bnx2x_link_update() local
6840 if (USES_WARPCORE(bp)) bnx2x_link_update()
6845 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); bnx2x_link_update()
6847 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + bnx2x_link_update()
6850 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bnx2x_link_update()
6852 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); bnx2x_link_update()
6855 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), bnx2x_link_update()
6856 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); bnx2x_link_update()
6859 if (!CHIP_IS_E3(bp)) bnx2x_link_update()
6860 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_link_update()
6978 bnx2x_rearm_latch_signal(bp, port, bnx2x_link_update()
7001 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, bnx2x_link_update()
7064 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); bnx2x_link_update()
7072 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) bnx2x_ext_phy_hw_reset() argument
7074 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_ext_phy_hw_reset()
7077 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_ext_phy_hw_reset()
7081 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, bnx2x_save_spirom_version() argument
7088 REG_WR(bp, ver_addr, spirom_ver); bnx2x_save_spirom_version()
7091 static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, bnx2x_save_bcm_spirom_ver() argument
7097 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_save_bcm_spirom_ver()
7099 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_save_bcm_spirom_ver()
7101 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), bnx2x_save_bcm_spirom_ver()
7105 static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, bnx2x_ext_phy_10G_an_resolve() argument
7110 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_10G_an_resolve()
7113 bnx2x_cl45_read(bp, phy, bnx2x_ext_phy_10G_an_resolve()
7129 struct bnx2x *bp = params->bp; bnx2x_8073_resolve_fc() local
7141 bnx2x_cl45_read(bp, phy, bnx2x_8073_resolve_fc()
7145 bnx2x_cl45_read(bp, phy, bnx2x_8073_resolve_fc()
7158 static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, bnx2x_8073_8727_external_rom_boot() argument
7168 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7174 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7179 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7184 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7190 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7210 bnx2x_cl45_read(bp, phy, bnx2x_8073_8727_external_rom_boot()
7213 bnx2x_cl45_read(bp, phy, bnx2x_8073_8727_external_rom_boot()
7223 bnx2x_cl45_write(bp, phy, bnx2x_8073_8727_external_rom_boot()
7226 bnx2x_save_bcm_spirom_ver(bp, phy, port); bnx2x_8073_8727_external_rom_boot()
7239 static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_8073_is_snr_needed() argument
7245 bnx2x_cl45_read(bp, phy, bnx2x_8073_is_snr_needed()
7254 bnx2x_cl45_read(bp, phy, bnx2x_8073_is_snr_needed()
7265 static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_8073_xaui_wa() argument
7269 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa()
7284 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa()
7303 bnx2x_cl45_read(bp, phy, bnx2x_8073_xaui_wa()
7321 static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_807x_force_10G() argument
7324 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G()
7326 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G()
7328 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G()
7330 bnx2x_cl45_write(bp, phy, bnx2x_807x_force_10G()
7339 struct bnx2x *bp = params->bp; bnx2x_8073_set_pause_cl37() local
7340 bnx2x_cl45_read(bp, phy, bnx2x_8073_set_pause_cl37()
7364 bnx2x_cl45_write(bp, phy, bnx2x_8073_set_pause_cl37()
7373 struct bnx2x *bp = params->bp; bnx2x_8073_specific_func() local
7377 bnx2x_cl45_write(bp, phy, bnx2x_8073_specific_func()
7379 bnx2x_cl45_write(bp, phy, bnx2x_8073_specific_func()
7389 struct bnx2x *bp = params->bp; bnx2x_8073_config_init() local
7394 if (CHIP_IS_E2(bp)) bnx2x_8073_config_init()
7395 gpio_port = BP_PATH(bp); bnx2x_8073_config_init()
7399 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_config_init()
7402 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_8073_config_init()
7408 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init()
7411 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init()
7421 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init()
7424 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init()
7432 if (REG_RD(bp, params->shmem_base + bnx2x_8073_config_init()
7437 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init()
7440 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init()
7446 bnx2x_807x_force_10G(bp, phy); bnx2x_8073_config_init()
7450 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init()
7477 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); bnx2x_8073_config_init()
7478 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); bnx2x_8073_config_init()
7485 bnx2x_cl45_read(bp, phy, bnx2x_8073_config_init()
7498 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); bnx2x_8073_config_init()
7501 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); bnx2x_8073_config_init()
7502 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, bnx2x_8073_config_init()
7507 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_8073_config_init()
7513 if (bnx2x_8073_is_snr_needed(bp, phy)) bnx2x_8073_config_init()
7514 bnx2x_cl45_write(bp, phy, bnx2x_8073_config_init()
7519 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); bnx2x_8073_config_init()
7521 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); bnx2x_8073_config_init()
7527 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); bnx2x_8073_config_init()
7537 struct bnx2x *bp = params->bp; bnx2x_8073_read_status() local
7543 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7549 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7551 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7555 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7559 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7565 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7569 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7571 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7578 if (bnx2x_8073_xaui_wa(bp, phy) != 0) bnx2x_8073_read_status()
7581 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7583 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7587 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7589 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7595 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { bnx2x_8073_read_status()
7600 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status()
7605 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status()
7609 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7640 bnx2x_cl45_read(bp, phy, bnx2x_8073_read_status()
7653 bnx2x_cl45_write(bp, phy, bnx2x_8073_read_status()
7658 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_8073_read_status()
7664 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_8073_read_status()
7681 struct bnx2x *bp = params->bp; bnx2x_8073_link_reset() local
7683 if (CHIP_IS_E2(bp)) bnx2x_8073_link_reset()
7684 gpio_port = BP_PATH(bp); bnx2x_8073_link_reset()
7689 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_link_reset()
7701 struct bnx2x *bp = params->bp; bnx2x_8705_config_init() local
7704 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8705_config_init()
7707 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8705_config_init()
7708 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); bnx2x_8705_config_init()
7709 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8705_config_init()
7711 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init()
7713 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init()
7715 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init()
7717 bnx2x_cl45_write(bp, phy, bnx2x_8705_config_init()
7720 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); bnx2x_8705_config_init()
7730 struct bnx2x *bp = params->bp; bnx2x_8705_read_status() local
7732 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status()
7736 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status()
7740 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status()
7743 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status()
7745 bnx2x_cl45_read(bp, phy, bnx2x_8705_read_status()
7764 struct bnx2x *bp = params->bp; bnx2x_set_disable_pmd_transmit() local
7778 bnx2x_cl45_write(bp, phy, bnx2x_set_disable_pmd_transmit()
7787 struct bnx2x *bp = params->bp; bnx2x_get_gpio_port() local
7788 if (CHIP_IS_E2(bp)) bnx2x_get_gpio_port()
7789 gpio_port = BP_PATH(bp); bnx2x_get_gpio_port()
7792 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_get_gpio_port()
7793 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_get_gpio_port()
7803 struct bnx2x *bp = params->bp; bnx2x_sfp_e1e2_set_transmitter() local
7807 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_sfp_e1e2_set_transmitter()
7816 bnx2x_cl45_read(bp, phy, bnx2x_sfp_e1e2_set_transmitter()
7826 bnx2x_cl45_write(bp, phy, bnx2x_sfp_e1e2_set_transmitter()
7845 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); bnx2x_sfp_e1e2_set_transmitter()
7858 struct bnx2x *bp = params->bp; bnx2x_sfp_set_transmitter() local
7860 if (CHIP_IS_E3(bp)) bnx2x_sfp_set_transmitter()
7871 struct bnx2x *bp = params->bp; bnx2x_8726_read_sfp_module_eeprom() local
7880 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7885 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7890 bnx2x_cl45_write(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7896 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7915 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7922 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_sfp_module_eeprom()
7937 struct bnx2x *bp = params->bp; bnx2x_warpcore_power_module() local
7939 pin_cfg = (REG_RD(bp, params->shmem_base + bnx2x_warpcore_power_module()
7952 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); bnx2x_warpcore_power_module()
7964 struct bnx2x *bp = params->bp; bnx2x_warpcore_read_sfp_module_eeprom() local
7981 rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, bnx2x_warpcore_read_sfp_module_eeprom()
8000 struct bnx2x *bp = params->bp; bnx2x_8727_read_sfp_module_eeprom() local
8013 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8019 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8025 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8031 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8036 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8042 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8053 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8072 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8079 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_sfp_module_eeprom()
8095 struct bnx2x *bp = params->bp; bnx2x_read_sfp_module_eeprom() local
8136 struct bnx2x *bp = params->bp; bnx2x_get_edc_mode() local
8203 if (!CHIP_IS_E1x(bp)) { bnx2x_get_edc_mode()
8204 gport = BP_PATH(bp) + bnx2x_get_edc_mode()
8207 netdev_err(bp->dev, bnx2x_get_edc_mode()
8238 media_types = REG_RD(bp, sync_offset); bnx2x_get_edc_mode()
8250 REG_WR(bp, sync_offset, media_types); bnx2x_get_edc_mode()
8277 struct bnx2x *bp = params->bp; bnx2x_verify_sfp_module() local
8283 val = REG_RD(bp, params->shmem_base + bnx2x_verify_sfp_module()
8313 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); bnx2x_verify_sfp_module()
8339 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," bnx2x_verify_sfp_module()
8354 struct bnx2x *bp = params->bp; bnx2x_wait_for_sfp_module_initialized() local
8382 static void bnx2x_8727_power_module(struct bnx2x *bp, bnx2x_8727_power_module() argument
8408 bnx2x_cl45_write(bp, phy, bnx2x_8727_power_module()
8414 static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, bnx2x_8726_set_limiting_mode() argument
8420 bnx2x_cl45_read(bp, phy, bnx2x_8726_set_limiting_mode()
8429 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode()
8443 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode()
8447 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode()
8451 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode()
8455 bnx2x_cl45_write(bp, phy, bnx2x_8726_set_limiting_mode()
8463 static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, bnx2x_8727_set_limiting_mode() argument
8469 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_limiting_mode()
8474 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode()
8479 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_limiting_mode()
8484 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode()
8489 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_limiting_mode()
8501 struct bnx2x *bp = params->bp; bnx2x_8727_specific_func() local
8512 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func()
8515 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func()
8518 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func()
8521 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_specific_func()
8532 bnx2x_cl45_write(bp, phy, bnx2x_8727_specific_func()
8546 struct bnx2x *bp = params->bp; bnx2x_set_e1e2_module_fault_led() local
8548 u32 fault_led_gpio = REG_RD(bp, params->shmem_base + bnx2x_set_e1e2_module_fault_led()
8566 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); bnx2x_set_e1e2_module_fault_led()
8580 struct bnx2x *bp = params->bp; bnx2x_set_e3_module_fault_led() local
8581 pin_cfg = (REG_RD(bp, params->shmem_base + bnx2x_set_e3_module_fault_led()
8588 bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); bnx2x_set_e3_module_fault_led()
8594 struct bnx2x *bp = params->bp; bnx2x_set_sfp_module_fault_led() local
8596 if (CHIP_IS_E3(bp)) { bnx2x_set_sfp_module_fault_led()
8608 struct bnx2x *bp = params->bp; bnx2x_warpcore_hw_reset() local
8611 REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); bnx2x_warpcore_hw_reset()
8614 REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); bnx2x_warpcore_hw_reset()
8615 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); bnx2x_warpcore_hw_reset()
8616 REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); bnx2x_warpcore_hw_reset()
8623 struct bnx2x *bp = params->bp; bnx2x_power_sfp_module() local
8629 bnx2x_8727_power_module(params->bp, phy, power); bnx2x_power_sfp_module()
8644 struct bnx2x *bp = params->bp; bnx2x_warpcore_set_limiting_mode() local
8648 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode()
8666 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode()
8669 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_warpcore_set_limiting_mode()
8673 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_set_limiting_mode()
8674 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_warpcore_set_limiting_mode()
8684 bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); bnx2x_set_limiting_mode()
8688 bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); bnx2x_set_limiting_mode()
8699 struct bnx2x *bp = params->bp; bnx2x_sfp_module_detection() local
8703 u32 val = REG_RD(bp, params->shmem_base + bnx2x_sfp_module_detection()
8753 struct bnx2x *bp = params->bp; bnx2x_handle_module_detect_int() local
8757 if (CHIP_IS_E3(bp)) { bnx2x_handle_module_detect_int()
8764 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, bnx2x_handle_module_detect_int()
8775 gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); bnx2x_handle_module_detect_int()
8779 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_handle_module_detect_int()
8783 bnx2x_set_gpio_int(bp, gpio_num, bnx2x_handle_module_detect_int()
8788 if (CHIP_IS_E3(bp)) { bnx2x_handle_module_detect_int()
8794 bnx2x_cl45_read(bp, phy, bnx2x_handle_module_detect_int()
8801 bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_handle_module_detect_int()
8803 bnx2x_warpcore_reset_lane(bp, phy, 0); bnx2x_handle_module_detect_int()
8810 bnx2x_set_gpio_int(bp, gpio_num, bnx2x_handle_module_detect_int()
8823 static void bnx2x_sfp_mask_fault(struct bnx2x *bp, bnx2x_sfp_mask_fault() argument
8829 bnx2x_cl45_read(bp, phy, bnx2x_sfp_mask_fault()
8832 bnx2x_cl45_read(bp, phy, bnx2x_sfp_mask_fault()
8836 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); bnx2x_sfp_mask_fault()
8841 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); bnx2x_sfp_mask_fault()
8852 struct bnx2x *bp = params->bp; bnx2x_8706_8726_read_status() local
8855 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8858 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, bnx2x_8706_8726_read_status()
8862 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8864 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8868 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8870 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8872 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8874 bnx2x_cl45_read(bp, phy, bnx2x_8706_8726_read_status()
8894 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8706_8726_read_status()
8896 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8706_8726_read_status()
8914 struct bnx2x *bp = params->bp; bnx2x_8706_config_init() local
8916 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8706_config_init()
8919 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8706_config_init()
8920 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); bnx2x_8706_config_init()
8921 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8706_config_init()
8925 bnx2x_cl45_read(bp, phy, bnx2x_8706_config_init()
8940 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); bnx2x_8706_config_init()
8947 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); bnx2x_8706_config_init()
8954 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8957 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8961 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8968 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8972 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8975 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8978 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8982 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8984 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8987 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
8991 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); bnx2x_8706_config_init()
8997 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_8706_config_init()
9004 bnx2x_cl45_read(bp, phy, bnx2x_8706_config_init()
9007 bnx2x_cl45_write(bp, phy, bnx2x_8706_config_init()
9027 struct bnx2x *bp = params->bp; bnx2x_8726_config_loopback() local
9029 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); bnx2x_8726_config_loopback()
9035 struct bnx2x *bp = params->bp; bnx2x_8726_external_rom_boot() local
9040 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot()
9044 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot()
9049 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot()
9053 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot()
9062 bnx2x_cl45_write(bp, phy, bnx2x_8726_external_rom_boot()
9067 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); bnx2x_8726_external_rom_boot()
9074 struct bnx2x *bp = params->bp; bnx2x_8726_read_status() local
9078 bnx2x_cl45_read(bp, phy, bnx2x_8726_read_status()
9095 struct bnx2x *bp = params->bp; bnx2x_8726_config_init() local
9098 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_8726_config_init()
9099 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8726_config_init()
9112 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9114 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9116 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9118 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9130 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9132 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9134 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9136 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9138 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9143 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9145 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9150 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9161 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9166 bnx2x_cl45_write(bp, phy, bnx2x_8726_config_init()
9179 struct bnx2x *bp = params->bp; bnx2x_8726_link_reset() local
9182 bnx2x_cl45_write(bp, phy, bnx2x_8726_link_reset()
9194 struct bnx2x *bp = params->bp; bnx2x_8727_set_link_led() local
9216 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_link_led()
9222 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_link_led()
9226 bnx2x_cl45_read(bp, phy, bnx2x_8727_set_link_led()
9232 bnx2x_cl45_write(bp, phy, bnx2x_8727_set_link_led()
9244 struct bnx2x *bp = params->bp; bnx2x_8727_hw_reset() local
9245 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8727_hw_reset()
9246 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8727_hw_reset()
9248 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, bnx2x_8727_hw_reset()
9255 struct bnx2x *bp = params->bp; bnx2x_8727_config_speed() local
9261 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9263 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9265 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_speed()
9272 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_speed()
9276 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9288 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9290 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9296 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9299 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9301 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9303 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_speed()
9315 struct bnx2x *bp = params->bp; bnx2x_8727_config_init() local
9318 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8727_config_init()
9326 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init()
9335 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init()
9341 bnx2x_8727_power_module(bp, phy, 1); bnx2x_8727_config_init()
9343 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init()
9346 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init()
9358 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init()
9362 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init()
9370 tx_en_mode = REG_RD(bp, params->shmem_base + bnx2x_8727_config_init()
9378 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init()
9382 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init()
9384 bnx2x_cl45_read(bp, phy, bnx2x_8727_config_init()
9387 bnx2x_cl45_write(bp, phy, bnx2x_8727_config_init()
9398 struct bnx2x *bp = params->bp; bnx2x_8727_handle_mod_abs() local
9400 u32 val = REG_RD(bp, params->shmem_base + bnx2x_8727_handle_mod_abs()
9404 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs()
9423 bnx2x_cl45_write(bp, phy, bnx2x_8727_handle_mod_abs()
9430 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs()
9448 bnx2x_cl45_write(bp, phy, bnx2x_8727_handle_mod_abs()
9457 bnx2x_cl45_read(bp, phy, bnx2x_8727_handle_mod_abs()
9485 struct bnx2x *bp = params->bp; bnx2x_8727_read_status() local
9491 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9498 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9504 bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, bnx2x_8727_read_status()
9507 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9513 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9521 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9526 if (!CHIP_IS_E1x(bp)) bnx2x_8727_read_status()
9527 oc_port = BP_PATH(bp) + (params->port << 1); bnx2x_8727_read_status()
9531 netdev_err(bp->dev, "Error: Power fault on Port %d has " bnx2x_8727_read_status()
9540 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status()
9544 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9549 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status()
9553 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9556 bnx2x_8727_power_module(params->bp, phy, 0); bnx2x_8727_read_status()
9565 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status()
9578 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9603 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_read_status()
9606 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, bnx2x_8727_read_status()
9622 bnx2x_cl45_read(bp, phy, bnx2x_8727_read_status()
9632 bnx2x_cl45_write(bp, phy, bnx2x_8727_read_status()
9642 struct bnx2x *bp = params->bp; bnx2x_8727_link_reset() local
9650 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); bnx2x_8727_link_reset()
9665 struct bnx2x *bp, bnx2x_save_848xx_spirom_version()
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); bnx2x_save_848xx_spirom_version()
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, bnx2x_save_848xx_spirom_version()
9686 bnx2x_cl45_write(bp, phy, reg_set[i].devad, bnx2x_save_848xx_spirom_version()
9690 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); bnx2x_save_848xx_spirom_version()
9698 bnx2x_save_spirom_version(bp, port, 0, bnx2x_save_848xx_spirom_version()
9705 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); bnx2x_save_848xx_spirom_version()
9706 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); bnx2x_save_848xx_spirom_version()
9707 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); bnx2x_save_848xx_spirom_version()
9709 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); bnx2x_save_848xx_spirom_version()
9717 bnx2x_save_spirom_version(bp, port, 0, bnx2x_save_848xx_spirom_version()
9723 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); bnx2x_save_848xx_spirom_version()
9725 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); bnx2x_save_848xx_spirom_version()
9727 bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, bnx2x_save_848xx_spirom_version()
9732 static void bnx2x_848xx_set_led(struct bnx2x *bp, bnx2x_848xx_set_led() argument
9746 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_led()
9752 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_led()
9757 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, bnx2x_848xx_set_led()
9766 bnx2x_cl45_read_or_write(bp, phy, bnx2x_848xx_set_led()
9775 struct bnx2x *bp = params->bp; bnx2x_848xx_specific_func() local
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); bnx2x_848xx_specific_func()
9786 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, bnx2x_848xx_specific_func()
9789 bnx2x_848xx_set_led(bp, phy); bnx2x_848xx_specific_func()
9798 struct bnx2x *bp = params->bp; bnx2x_848xx_cmn_config_init() local
9802 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9806 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init()
9811 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init()
9815 bnx2x_cl45_read(bp, phy, bnx2x_848xx_cmn_config_init()
9834 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9882 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9894 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9900 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9912 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9924 bp, phy, bnx2x_848xx_cmn_config_init()
9928 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9932 bnx2x_cl45_write(bp, phy, bnx2x_848xx_cmn_config_init()
9944 struct bnx2x *bp = params->bp; bnx2x_8481_config_init() local
9946 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8481_config_init()
9950 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_8481_config_init()
9951 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8481_config_init()
9953 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_8481_config_init()
9967 struct bnx2x *bp = params->bp; bnx2x_84858_cmd_hdlr() local
9977 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84858_cmd_hdlr()
9994 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84858_cmd_hdlr()
10002 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84858_cmd_hdlr()
10011 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84858_cmd_hdlr()
10029 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84858_cmd_hdlr()
10043 struct bnx2x *bp = params->bp; bnx2x_84833_cmd_hdlr() local
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10069 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_84833_cmd_hdlr()
10098 struct bnx2x *bp = params->bp; bnx2x_848xx_cmd_hdlr() local
10101 (REG_RD(bp, params->shmem2_base + bnx2x_848xx_cmd_hdlr()
10120 struct bnx2x *bp = params->bp; bnx2x_848xx_pair_swap_cfg() local
10123 pair_swap = REG_RD(bp, params->shmem_base + bnx2x_848xx_pair_swap_cfg()
10143 static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, bnx2x_84833_get_reset_gpios() argument
10150 if (CHIP_IS_E3(bp)) { bnx2x_84833_get_reset_gpios()
10154 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + bnx2x_84833_get_reset_gpios()
10167 reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + bnx2x_84833_get_reset_gpios()
10184 struct bnx2x *bp = params->bp; bnx2x_84833_hw_reset_phy() local
10186 u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + bnx2x_84833_hw_reset_phy()
10193 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_84833_hw_reset_phy()
10196 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_84833_hw_reset_phy()
10203 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, bnx2x_84833_hw_reset_phy()
10206 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); bnx2x_84833_hw_reset_phy()
10219 struct bnx2x *bp = params->bp; bnx2x_8483x_disable_eee() local
10240 struct bnx2x *bp = params->bp; bnx2x_8483x_enable_eee() local
10258 struct bnx2x *bp = params->bp; bnx2x_848x3_config_init() local
10267 if (!(CHIP_IS_E1x(bp))) bnx2x_848x3_config_init()
10268 port = BP_PATH(bp); bnx2x_848x3_config_init()
10273 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, bnx2x_848x3_config_init()
10278 bnx2x_cl45_write(bp, phy, bnx2x_848x3_config_init()
10283 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_848x3_config_init()
10302 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848x3_config_init()
10311 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init()
10319 if (CHIP_IS_E3(bp)) { bnx2x_848x3_config_init()
10350 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init()
10372 bnx2x_save_848xx_spirom_version(phy, bp, params->port); bnx2x_848x3_config_init()
10375 u32 cms_enable = REG_RD(bp, params->shmem_base + bnx2x_848x3_config_init()
10380 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init()
10386 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init()
10390 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, bnx2x_848x3_config_init()
10421 bnx2x_cl45_read_and_write(bp, phy, bnx2x_848x3_config_init()
10433 struct bnx2x *bp = params->bp; bnx2x_848xx_read_status() local
10440 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status()
10442 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status()
10452 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_848xx_read_status()
10457 bnx2x_cl45_write(bp, phy, bnx2x_848xx_read_status()
10462 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status()
10493 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status()
10500 bnx2x_cl45_read(bp, phy, bnx2x_848xx_read_status()
10515 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status()
10533 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status()
10543 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_848xx_read_status()
10570 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_8481_hw_reset()
10572 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_8481_hw_reset()
10579 bnx2x_cl45_write(params->bp, phy, bnx2x_8481_link_reset()
10581 bnx2x_cl45_write(params->bp, phy, bnx2x_8481_link_reset()
10588 struct bnx2x *bp = params->bp; bnx2x_848x3_link_reset() local
10592 if (!(CHIP_IS_E1x(bp))) bnx2x_848x3_link_reset()
10593 port = BP_PATH(bp); bnx2x_848x3_link_reset()
10598 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, bnx2x_848x3_link_reset()
10602 bnx2x_cl45_read(bp, phy, bnx2x_848x3_link_reset()
10606 bnx2x_cl45_write(bp, phy, bnx2x_848x3_link_reset()
10615 struct bnx2x *bp = params->bp; bnx2x_848xx_set_link_led() local
10619 if (!(CHIP_IS_E1x(bp))) bnx2x_848xx_set_link_led()
10620 port = BP_PATH(bp); bnx2x_848xx_set_link_led()
10633 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10638 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10643 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10648 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10654 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10669 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10674 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10679 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10684 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10690 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10699 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_848xx_set_link_led()
10706 bp, bnx2x_848xx_set_link_led()
10711 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10725 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led()
10732 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10738 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10743 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10748 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10753 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10758 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10767 if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_848xx_set_link_led()
10774 bp, bnx2x_848xx_set_link_led()
10779 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10795 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led()
10804 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10811 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10816 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10821 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10826 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10840 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10846 bnx2x_cl45_read(bp, phy, bnx2x_848xx_set_link_led()
10852 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10861 bnx2x_cl45_write(bp, phy, bnx2x_848xx_set_link_led()
10879 if (CHIP_IS_E3(bp)) { bnx2x_848xx_set_link_led()
10880 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_848xx_set_link_led()
10892 struct bnx2x *bp = params->bp; bnx2x_54618se_specific_func() local
10898 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func()
10901 bnx2x_cl22_read(bp, phy, bnx2x_54618se_specific_func()
10906 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func()
10910 bnx2x_cl22_write(bp, phy, bnx2x_54618se_specific_func()
10921 struct bnx2x *bp = params->bp; bnx2x_54618se_config_init() local
10934 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_54618se_config_init()
10941 bnx2x_set_cfg_pin(bp, cfg_pin, 1); bnx2x_54618se_config_init()
10947 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
10949 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_54618se_config_init()
10957 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
10960 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init()
10964 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
10981 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init()
10985 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init()
10989 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init()
11010 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
11013 bnx2x_cl22_read(bp, phy, bnx2x_54618se_config_init()
11049 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
11056 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
11065 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, bnx2x_54618se_config_init()
11068 bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); bnx2x_54618se_config_init()
11070 bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); bnx2x_54618se_config_init()
11105 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_54618se_config_init()
11110 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
11117 bnx2x_cl22_write(bp, phy, bnx2x_54618se_config_init()
11127 struct bnx2x *bp = params->bp; bnx2x_5461x_set_link_led() local
11130 bnx2x_cl22_write(bp, phy, bnx2x_5461x_set_link_led()
11133 bnx2x_cl22_read(bp, phy, bnx2x_5461x_set_link_led()
11153 bnx2x_cl22_write(bp, phy, bnx2x_5461x_set_link_led()
11163 struct bnx2x *bp = params->bp; bnx2x_54618se_link_reset() local
11170 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); bnx2x_54618se_link_reset()
11175 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_54618se_link_reset()
11182 bnx2x_set_cfg_pin(bp, cfg_pin, 0); bnx2x_54618se_link_reset()
11189 struct bnx2x *bp = params->bp; bnx2x_54618se_read_status() local
11195 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status()
11201 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status()
11238 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status()
11244 bnx2x_cl22_read(bp, phy, bnx2x_54618se_read_status()
11258 bnx2x_cl22_read(bp, phy, 0x5, &val); bnx2x_54618se_read_status()
11276 bnx2x_cl22_read(bp, phy, 0xa, &val); bnx2x_54618se_read_status()
11295 struct bnx2x *bp = params->bp; bnx2x_54618se_config_loopback() local
11303 bnx2x_cl22_write(bp, phy, 0x09, 3<<11); bnx2x_54618se_config_loopback()
11310 bnx2x_cl22_read(bp, phy, 0x00, &val); bnx2x_54618se_config_loopback()
11313 bnx2x_cl22_write(bp, phy, 0x00, val); bnx2x_54618se_config_loopback()
11319 bnx2x_cl22_write(bp, phy, 0x18, 7); bnx2x_54618se_config_loopback()
11320 bnx2x_cl22_read(bp, phy, 0x18, &val); bnx2x_54618se_config_loopback()
11321 bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); bnx2x_54618se_config_loopback()
11324 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); bnx2x_54618se_config_loopback()
11329 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); bnx2x_54618se_config_loopback()
11338 struct bnx2x *bp = params->bp; bnx2x_7101_config_loopback() local
11340 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_loopback()
11349 struct bnx2x *bp = params->bp; bnx2x_7101_config_init() local
11353 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_7101_config_init()
11356 bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_7101_config_init()
11357 bnx2x_wait_reset_complete(bp, phy, params); bnx2x_7101_config_init()
11359 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init()
11362 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init()
11367 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init()
11370 bnx2x_cl45_write(bp, phy, bnx2x_7101_config_init()
11374 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init()
11377 bnx2x_cl45_read(bp, phy, bnx2x_7101_config_init()
11379 bnx2x_save_spirom_version(bp, params->port, bnx2x_7101_config_init()
11388 struct bnx2x *bp = params->bp; bnx2x_7101_read_status() local
11391 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status()
11393 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status()
11397 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status()
11399 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status()
11406 bnx2x_cl45_read(bp, phy, bnx2x_7101_read_status()
11413 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_7101_read_status()
11437 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) bnx2x_sfx7101_sp_sw_reset() argument
11441 bnx2x_cl45_read(bp, phy, bnx2x_sfx7101_sp_sw_reset()
11448 bnx2x_cl45_write(bp, phy, bnx2x_sfx7101_sp_sw_reset()
11453 bnx2x_cl45_read(bp, phy, bnx2x_sfx7101_sp_sw_reset()
11465 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, bnx2x_7101_hw_reset()
11468 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, bnx2x_7101_hw_reset()
11476 struct bnx2x *bp = params->bp; bnx2x_7101_set_link_led() local
11489 bnx2x_cl45_write(bp, phy, bnx2x_7101_set_link_led()
12030 static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, bnx2x_populate_preemphasis() argument
12042 rx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis()
12046 tx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis()
12050 rx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis()
12054 tx = REG_RD(bp, shmem_base + bnx2x_populate_preemphasis()
12067 static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, bnx2x_get_ext_phy_config() argument
12073 ext_phy_config = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_config()
12078 ext_phy_config = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_config()
12089 static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, bnx2x_populate_int_phy() argument
12094 u32 switch_cfg = (REG_RD(bp, shmem_base + bnx2x_populate_int_phy()
12098 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | bnx2x_populate_int_phy()
12099 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); bnx2x_populate_int_phy()
12102 if (USES_WARPCORE(bp)) { bnx2x_populate_int_phy()
12104 phy_addr = REG_RD(bp, bnx2x_populate_int_phy()
12107 if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) bnx2x_populate_int_phy()
12112 serdes_net_if = (REG_RD(bp, shmem_base + bnx2x_populate_int_phy()
12187 if (CHIP_REV(bp) == CHIP_REV_Ax) bnx2x_populate_int_phy()
12194 phy_addr = REG_RD(bp, bnx2x_populate_int_phy()
12200 phy_addr = REG_RD(bp, bnx2x_populate_int_phy()
12211 phy->mdio_ctrl = bnx2x_get_emac_base(bp, bnx2x_populate_int_phy()
12214 if (CHIP_IS_E2(bp)) bnx2x_populate_int_phy()
12222 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); bnx2x_populate_int_phy()
12226 static int bnx2x_populate_ext_phy(struct bnx2x *bp, bnx2x_populate_ext_phy() argument
12235 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, bnx2x_populate_ext_phy()
12302 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); bnx2x_populate_ext_phy()
12308 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, bnx2x_populate_ext_phy()
12319 u32 size = REG_RD(bp, shmem2_base); bnx2x_populate_ext_phy()
12334 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); bnx2x_populate_ext_phy()
12340 u32 raw_ver = REG_RD(bp, phy->ver_addr); bnx2x_populate_ext_phy()
12354 static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, bnx2x_populate_phy() argument
12360 return bnx2x_populate_int_phy(bp, shmem_base, port, phy); bnx2x_populate_phy()
12361 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_populate_phy()
12370 struct bnx2x *bp = params->bp; bnx2x_phy_def_cfg() local
12374 link_config = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg()
12377 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg()
12382 link_config = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg()
12385 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + bnx2x_phy_def_cfg()
12475 struct bnx2x *bp = params->bp; bnx2x_phy_probe() local
12495 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, bnx2x_phy_probe()
12521 media_types = REG_RD(bp, sync_offset); bnx2x_phy_probe()
12535 REG_WR(bp, sync_offset, media_types); bnx2x_phy_probe()
12548 struct bnx2x *bp = params->bp; bnx2x_init_bmac_loopback() local
12562 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_bmac_loopback()
12568 struct bnx2x *bp = params->bp; bnx2x_init_emac_loopback() local
12581 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_emac_loopback()
12587 struct bnx2x *bp = params->bp; bnx2x_init_xmac_loopback() local
12601 bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0); bnx2x_init_xmac_loopback()
12607 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_xmac_loopback()
12613 struct bnx2x *bp = params->bp; bnx2x_init_umac_loopback() local
12622 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_umac_loopback()
12628 struct bnx2x *bp = params->bp; bnx2x_init_xgxs_loopback() local
12641 if (!USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback()
12646 if (USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback()
12653 if (USES_WARPCORE(bp)) bnx2x_init_xgxs_loopback()
12672 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_init_xgxs_loopback()
12679 struct bnx2x *bp = params->bp; bnx2x_set_rx_filter() local
12683 if (!CHIP_IS_E1x(bp)) bnx2x_set_rx_filter()
12685 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); bnx2x_set_rx_filter()
12687 if (!CHIP_IS_E1(bp)) { bnx2x_set_rx_filter()
12688 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, bnx2x_set_rx_filter()
12692 REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : bnx2x_set_rx_filter()
12700 struct bnx2x *bp = params->bp; bnx2x_avoid_link_flap() local
12702 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_avoid_link_flap()
12722 lfa_sts = REG_RD(bp, params->lfa_base + bnx2x_avoid_link_flap()
12729 if (CHIP_IS_E3(bp)) { bnx2x_avoid_link_flap()
12731 REG_WR(bp, GRCBASE_MISC + bnx2x_avoid_link_flap()
12735 REG_WR(bp, GRCBASE_MISC + bnx2x_avoid_link_flap()
12759 REG_WR(bp, params->lfa_base + bnx2x_avoid_link_flap()
12763 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_avoid_link_flap()
12775 struct bnx2x *bp = params->bp; bnx2x_cannot_avoid_link_flap() local
12782 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12786 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12790 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12795 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12801 tmp_val = REG_RD(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12806 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12809 lfa_sts = REG_RD(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12825 REG_WR(bp, params->lfa_base + bnx2x_cannot_avoid_link_flap()
12833 struct bnx2x *bp = params->bp; bnx2x_phy_init() local
12866 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, bnx2x_phy_init()
12902 if (!CHIP_IS_E3(bp)) { bnx2x_phy_init()
12906 bnx2x_serdes_deassert(bp, params->port); bnx2x_phy_init()
12922 struct bnx2x *bp = params->bp; bnx2x_link_reset() local
12932 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, bnx2x_link_reset()
12939 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); bnx2x_link_reset()
12942 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset()
12943 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); bnx2x_link_reset()
12944 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); bnx2x_link_reset()
12947 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset()
12948 bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); bnx2x_link_reset()
12954 if (!CHIP_IS_E3(bp)) bnx2x_link_reset()
12955 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); bnx2x_link_reset()
12962 bnx2x_set_mdio_emac_per_phy(bp, params); bnx2x_link_reset()
12983 bnx2x_rearm_latch_signal(bp, port, 0); bnx2x_link_reset()
12984 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, bnx2x_link_reset()
12992 if (!CHIP_IS_E3(bp)) { bnx2x_link_reset()
12994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, bnx2x_link_reset()
12996 REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); bnx2x_link_reset()
12997 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); bnx2x_link_reset()
13001 if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_link_reset()
13003 REG_WR(bp, xmac_base + XMAC_REG_CTRL, bnx2x_link_reset()
13013 struct bnx2x *bp = params->bp; bnx2x_lfa_reset() local
13023 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); bnx2x_lfa_reset()
13029 if (!CHIP_IS_E3(bp)) bnx2x_lfa_reset()
13030 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); bnx2x_lfa_reset()
13032 if (CHIP_IS_E3(bp)) { bnx2x_lfa_reset()
13050 if (!CHIP_IS_E3(bp)) bnx2x_lfa_reset()
13051 bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); bnx2x_lfa_reset()
13053 if (CHIP_IS_E3(bp)) { bnx2x_lfa_reset()
13058 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_lfa_reset()
13065 static int bnx2x_8073_common_init_phy(struct bnx2x *bp, bnx2x_8073_common_init_phy() argument
13076 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8073_common_init_phy()
13077 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8073_common_init_phy()
13079 bnx2x_ext_phy_hw_reset(bp, port); bnx2x_8073_common_init_phy()
13084 if (CHIP_IS_E1x(bp)) { bnx2x_8073_common_init_phy()
13095 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8073_common_init_phy()
13102 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_8073_common_init_phy()
13112 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_common_init_phy()
13117 bnx2x_cl45_write(bp, &phy[port], bnx2x_8073_common_init_phy()
13136 if (CHIP_IS_E1x(bp)) bnx2x_8073_common_init_phy()
13143 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13148 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13153 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13168 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13172 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13178 bnx2x_cl45_read(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13181 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8073_common_init_phy()
13186 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, bnx2x_8073_common_init_phy()
13191 static int bnx2x_8726_common_init_phy(struct bnx2x *bp, bnx2x_8726_common_init_phy() argument
13201 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); bnx2x_8726_common_init_phy()
13204 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_8726_common_init_phy()
13206 bnx2x_ext_phy_hw_reset(bp, 0); bnx2x_8726_common_init_phy()
13212 if (CHIP_IS_E1x(bp)) { bnx2x_8726_common_init_phy()
13220 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8726_common_init_phy()
13228 bnx2x_cl45_write(bp, &phy, bnx2x_8726_common_init_phy()
13233 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, bnx2x_8726_common_init_phy()
13240 static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, bnx2x_get_ext_phy_reset_gpio() argument
13244 u32 phy_gpio_reset = REG_RD(bp, shmem_base + bnx2x_get_ext_phy_reset_gpio()
13286 static int bnx2x_8727_common_init_phy(struct bnx2x *bp, bnx2x_8727_common_init_phy() argument
13296 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_8727_common_init_phy()
13297 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_8727_common_init_phy()
13305 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], bnx2x_8727_common_init_phy()
13312 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, bnx2x_8727_common_init_phy()
13315 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, bnx2x_8727_common_init_phy()
13325 if (CHIP_IS_E1x(bp)) { bnx2x_8727_common_init_phy()
13336 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_8727_common_init_phy()
13343 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + bnx2x_8727_common_init_phy()
13352 bnx2x_cl45_write(bp, &phy[port], bnx2x_8727_common_init_phy()
13367 if (CHIP_IS_E1x(bp)) bnx2x_8727_common_init_phy()
13373 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], bnx2x_8727_common_init_phy()
13377 bnx2x_cl45_write(bp, phy_blk[port], bnx2x_8727_common_init_phy()
13385 static int bnx2x_84833_common_init_phy(struct bnx2x *bp, bnx2x_84833_common_init_phy() argument
13392 reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); bnx2x_84833_common_init_phy()
13393 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); bnx2x_84833_common_init_phy()
13395 bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); bnx2x_84833_common_init_phy()
13401 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], bnx2x_ext_phy_common_init() argument
13409 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init()
13416 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init()
13425 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init()
13435 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, bnx2x_ext_phy_common_init()
13450 netdev_err(bp->dev, "Warning: PHY was not initialized," bnx2x_ext_phy_common_init()
13456 int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], bnx2x_common_init_phy() argument
13464 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0); bnx2x_common_init_phy()
13465 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1); bnx2x_common_init_phy()
13467 if (CHIP_IS_E3(bp)) { bnx2x_common_init_phy()
13469 val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); bnx2x_common_init_phy()
13470 REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); bnx2x_common_init_phy()
13473 phy_ver = REG_RD(bp, shmem_base_path[0] + bnx2x_common_init_phy()
13485 ext_phy_config = bnx2x_get_ext_phy_config(bp, bnx2x_common_init_phy()
13489 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, bnx2x_common_init_phy()
13500 struct bnx2x *bp = params->bp; bnx2x_check_over_curr() local
13505 cfg_pin = (REG_RD(bp, params->shmem_base + bnx2x_check_over_curr()
13512 if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) bnx2x_check_over_curr()
13517 netdev_err(bp->dev, "Error: Power fault on Port %d has" bnx2x_check_over_curr()
13537 struct bnx2x *bp = params->bp; bnx2x_analyze_link_error() local
13573 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); bnx2x_analyze_link_error()
13586 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_analyze_link_error()
13598 bnx2x_notify_link_changed(bp); bnx2x_analyze_link_error()
13616 struct bnx2x *bp = params->bp; bnx2x_check_half_open_conn() local
13621 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) bnx2x_check_half_open_conn()
13624 if (CHIP_IS_E3(bp) && bnx2x_check_half_open_conn()
13625 (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_check_half_open_conn()
13635 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); bnx2x_check_half_open_conn()
13636 REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, bnx2x_check_half_open_conn()
13639 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) bnx2x_check_half_open_conn()
13645 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & bnx2x_check_half_open_conn()
13653 if (CHIP_IS_E2(bp)) bnx2x_check_half_open_conn()
13658 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); bnx2x_check_half_open_conn()
13671 struct bnx2x *bp = params->bp; bnx2x_sfp_tx_fault_detection() local
13676 cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, bnx2x_sfp_tx_fault_detection()
13681 if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { bnx2x_sfp_tx_fault_detection()
13714 struct bnx2x *bp = params->bp; bnx2x_kr2_recovery() local
13724 struct bnx2x *bp = params->bp; bnx2x_check_kr2_wa() local
13748 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, bnx2x_check_kr2_wa()
13750 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_check_kr2_wa()
13752 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, bnx2x_check_kr2_wa()
13796 struct bnx2x *bp = params->bp; bnx2x_period_func() local
13807 if (CHIP_IS_E3(bp)) { bnx2x_period_func()
13817 if ((REG_RD(bp, params->shmem_base + bnx2x_period_func()
13836 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, bnx2x_fan_failure_det_req() argument
13845 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, bnx2x_fan_failure_det_req()
13860 struct bnx2x *bp = params->bp; bnx2x_hw_reset_phy() local
13862 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, bnx2x_hw_reset_phy()
13879 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, bnx2x_init_mod_abs_int() argument
13886 if (CHIP_IS_E3(bp)) { bnx2x_init_mod_abs_int()
13887 if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, bnx2x_init_mod_abs_int()
13897 if (bnx2x_populate_phy(bp, phy_index, shmem_base, bnx2x_init_mod_abs_int()
13915 bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); bnx2x_init_mod_abs_int()
13917 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); bnx2x_init_mod_abs_int()
13918 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); bnx2x_init_mod_abs_int()
13927 REG_WR(bp, sync_offset, vars->aeu_int_mask); bnx2x_init_mod_abs_int()
13938 aeu_mask = REG_RD(bp, offset); bnx2x_init_mod_abs_int()
13940 REG_WR(bp, offset, aeu_mask); bnx2x_init_mod_abs_int()
13943 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); bnx2x_init_mod_abs_int()
13945 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_init_mod_abs_int()
3090 bnx2x_bsc_read(struct link_params *params, struct bnx2x *bp, u8 sl_devid, u16 sl_addr, u8 lc_addr, u8 xfer_cnt, u32 *data_array) bnx2x_bsc_read() argument
9664 bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) bnx2x_save_848xx_spirom_version() argument
H A Dbnx2x_dcb.h73 #define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
74 (bp)->dcbx_port_params.ets.enabled)
155 #define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
156 ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
157 #define DCBX_PFC_PRI_PAUSE_MASK(bp) \
158 ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
159 #define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
160 ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
161 #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
162 (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
163 #define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
164 (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
165 #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
166 (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
167 #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
168 ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
169 #define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
170 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
171 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
187 void bnx2x_dcbx_init_params(struct bnx2x *bp);
188 void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
196 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
197 void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
201 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
204 int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
205 int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
H A Dbnx2x_sp.c48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp, bnx2x_exe_queue_init() argument
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, bnx2x_exe_queue_free_elem() argument
104 * @bp: driver handle
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp, bnx2x_exe_queue_add() argument
122 rc = o->optimize(bp, o->owner, elem); bnx2x_exe_queue_add()
127 rc = o->validate(bp, o->owner, elem); bnx2x_exe_queue_add()
142 bnx2x_exe_queue_free_elem(bp, elem); bnx2x_exe_queue_add()
150 struct bnx2x *bp, __bnx2x_exe_queue_reset_pending()
160 bnx2x_exe_queue_free_elem(bp, elem); __bnx2x_exe_queue_reset_pending()
167 * @bp: driver handle
173 static inline int bnx2x_exe_queue_step(struct bnx2x *bp, bnx2x_exe_queue_step() argument
191 __bnx2x_exe_queue_reset_pending(bp, o); bnx2x_exe_queue_step()
223 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); bnx2x_exe_queue_step()
233 __bnx2x_exe_queue_reset_pending(bp, o); bnx2x_exe_queue_step()
249 struct bnx2x *bp) bnx2x_exe_queue_alloc_elem()
278 * @bp: device handle
283 static inline int bnx2x_state_wait(struct bnx2x *bp, int state, bnx2x_state_wait() argument
289 if (CHIP_REV_IS_EMUL(bp)) bnx2x_state_wait()
305 if (bp->panic) bnx2x_state_wait()
318 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) bnx2x_raw_wait() argument
320 return bnx2x_state_wait(bp, raw->state, raw->pstate); bnx2x_raw_wait()
424 * @bp: device handle
430 static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, __bnx2x_vlan_mac_h_write_trylock() argument
445 * @bp: device handle
451 static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, __bnx2x_vlan_mac_h_exec_pending() argument
461 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); __bnx2x_vlan_mac_h_exec_pending()
474 * @bp: device handle
480 static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, __bnx2x_vlan_mac_h_pend() argument
493 * @bp: device handle
500 static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, __bnx2x_vlan_mac_h_write_unlock() argument
508 __bnx2x_vlan_mac_h_exec_pending(bp, o); __bnx2x_vlan_mac_h_write_unlock()
516 * @bp: device handle
522 static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, __bnx2x_vlan_mac_h_read_lock() argument
536 * @bp: device handle
541 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, bnx2x_vlan_mac_h_read_lock() argument
547 rc = __bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_vlan_mac_h_read_lock()
556 * @bp: device handle
563 static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, __bnx2x_vlan_mac_h_read_unlock() argument
584 __bnx2x_vlan_mac_h_write_unlock(bp, o); __bnx2x_vlan_mac_h_read_unlock()
591 * @bp: device handle
598 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, bnx2x_vlan_mac_h_read_unlock() argument
602 __bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_h_read_unlock()
606 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, bnx2x_get_n_elements() argument
615 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_get_n_elements()
632 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_get_n_elements()
639 static int bnx2x_check_mac_add(struct bnx2x *bp, bnx2x_check_mac_add() argument
659 static int bnx2x_check_vlan_add(struct bnx2x *bp, bnx2x_check_vlan_add() argument
674 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, bnx2x_check_vlan_mac_add() argument
696 bnx2x_check_mac_del(struct bnx2x *bp, bnx2x_check_mac_del() argument
713 bnx2x_check_vlan_del(struct bnx2x *bp, bnx2x_check_vlan_del() argument
729 bnx2x_check_vlan_mac_del(struct bnx2x *bp, bnx2x_check_vlan_mac_del() argument
750 static bool bnx2x_check_move(struct bnx2x *bp, bnx2x_check_move() argument
761 pos = src_o->check_del(bp, src_o, data); bnx2x_check_move()
764 rc = dst_o->check_add(bp, dst_o, data); bnx2x_check_move()
776 struct bnx2x *bp, bnx2x_check_move_always_err()
800 static void bnx2x_set_mac_in_nig(struct bnx2x *bp, bnx2x_set_mac_in_nig() argument
804 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : bnx2x_set_mac_in_nig()
807 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) bnx2x_set_mac_in_nig()
824 REG_WR_DMAE(bp, reg_offset, wb_data, 2); bnx2x_set_mac_in_nig()
827 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : bnx2x_set_mac_in_nig()
834 * @bp: device handle
841 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, bnx2x_vlan_mac_set_cmd_hdr_e2() argument
881 static void bnx2x_set_one_mac_e2(struct bnx2x *bp, bnx2x_set_one_mac_e2() argument
914 bnx2x_set_mac_in_nig(bp, add, mac, bnx2x_set_one_mac_e2()
917 bnx2x_set_mac_in_nig(bp, add, mac, bnx2x_set_one_mac_e2()
926 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, bnx2x_set_one_mac_e2()
945 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, bnx2x_set_one_mac_e2()
969 * @bp: device handle
977 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_rdata_hdr_e1x() argument
990 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_cfg_entry_e1x() argument
1016 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, bnx2x_vlan_mac_set_rdata_e1x() argument
1023 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, bnx2x_vlan_mac_set_rdata_e1x()
1025 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, bnx2x_vlan_mac_set_rdata_e1x()
1036 * @bp: device handle
1042 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, bnx2x_set_one_mac_e1x() argument
1059 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, bnx2x_set_one_mac_e1x()
1065 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, bnx2x_set_one_vlan_e2() argument
1084 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, bnx2x_set_one_vlan_e2()
1099 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, bnx2x_set_one_vlan_e2()
1115 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, bnx2x_set_one_vlan_mac_e2() argument
1136 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, bnx2x_set_one_vlan_mac_e2()
1155 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj, bnx2x_set_one_vlan_mac_e2()
1175 * @bp: device handle
1181 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, bnx2x_set_one_vlan_mac_e1h() argument
1198 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, bnx2x_set_one_vlan_mac_e1h()
1208 * @bp: device handle
1224 static int bnx2x_vlan_mac_restore(struct bnx2x *bp, bnx2x_vlan_mac_restore() argument
1263 return bnx2x_config_vlan_mac(bp, p); bnx2x_vlan_mac_restore()
1326 * @bp: device handle
1336 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, bnx2x_validate_vlan_mac_add() argument
1345 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_validate_vlan_mac_add()
1375 * @bp: device handle
1384 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, bnx2x_validate_vlan_mac_del() argument
1396 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_validate_vlan_mac_del()
1434 * @bp: device handle
1443 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, bnx2x_validate_vlan_mac_move() argument
1456 if (!src_o->check_move(bp, src_o, dest_o, bnx2x_validate_vlan_mac_move()
1505 static int bnx2x_validate_vlan_mac(struct bnx2x *bp, bnx2x_validate_vlan_mac() argument
1511 return bnx2x_validate_vlan_mac_add(bp, qo, elem); bnx2x_validate_vlan_mac()
1513 return bnx2x_validate_vlan_mac_del(bp, qo, elem); bnx2x_validate_vlan_mac()
1515 return bnx2x_validate_vlan_mac_move(bp, qo, elem); bnx2x_validate_vlan_mac()
1521 static int bnx2x_remove_vlan_mac(struct bnx2x *bp, bnx2x_remove_vlan_mac() argument
1553 * @bp: device handle
1557 static int bnx2x_wait_vlan_mac(struct bnx2x *bp, bnx2x_wait_vlan_mac() argument
1566 rc = raw->wait_comp(bp, raw); bnx2x_wait_vlan_mac()
1580 static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, __bnx2x_vlan_mac_execute_step() argument
1589 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); __bnx2x_vlan_mac_execute_step()
1592 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); __bnx2x_vlan_mac_execute_step()
1599 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); __bnx2x_vlan_mac_execute_step()
1609 * @bp: device handle
1615 static int bnx2x_complete_vlan_mac(struct bnx2x *bp, bnx2x_complete_vlan_mac() argument
1629 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); bnx2x_complete_vlan_mac()
1642 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); bnx2x_complete_vlan_mac()
1658 * @bp: device handle
1662 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, bnx2x_optimize_vlan_mac() argument
1706 bnx2x_exe_queue_free_elem(bp, pos); bnx2x_optimize_vlan_mac()
1716 * @bp: device handle
1725 struct bnx2x *bp, bnx2x_vlan_mac_get_registry_elem()
1761 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); bnx2x_vlan_mac_get_registry_elem()
1770 * @bp: device handle
1777 static int bnx2x_execute_vlan_mac(struct bnx2x *bp, bnx2x_execute_vlan_mac() argument
1811 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, list_for_each_entry()
1826 o->set_one_rule(bp, o, elem, idx, list_for_each_entry()
1843 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1856 reg_elem = o->check_del(bp, o, list_for_each_entry()
1888 reg_elem = o->check_del(bp, cam_obj, list_for_each_entry()
1901 struct bnx2x *bp, bnx2x_vlan_mac_push_new_cmd()
1909 elem = bnx2x_exe_queue_alloc_elem(bp); bnx2x_vlan_mac_push_new_cmd()
1926 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); bnx2x_vlan_mac_push_new_cmd()
1932 * @bp: device handle
1936 int bnx2x_config_vlan_mac(struct bnx2x *bp, bnx2x_config_vlan_mac() argument
1949 rc = bnx2x_vlan_mac_push_new_cmd(bp, p); bnx2x_config_vlan_mac()
1968 rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, bnx2x_config_vlan_mac()
1987 rc = raw->wait_comp(bp, raw); bnx2x_config_vlan_mac()
1992 rc = __bnx2x_vlan_mac_execute_step(bp, bnx2x_config_vlan_mac()
2008 * @bp: device handle
2018 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, bnx2x_vlan_mac_del_all() argument
2039 rc = exeq->remove(bp, exeq->owner, exeq_pos); bnx2x_vlan_mac_del_all()
2046 bnx2x_exe_queue_free_elem(bp, exeq_pos); bnx2x_vlan_mac_del_all()
2066 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); bnx2x_vlan_mac_del_all()
2076 rc = bnx2x_config_vlan_mac(bp, &p); bnx2x_vlan_mac_del_all()
2079 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_del_all()
2086 bnx2x_vlan_mac_h_read_unlock(bp, o); bnx2x_vlan_mac_del_all()
2091 return bnx2x_config_vlan_mac(bp, &p); bnx2x_vlan_mac_del_all()
2135 void bnx2x_init_mac_obj(struct bnx2x *bp, bnx2x_init_mac_obj() argument
2154 if (CHIP_IS_E1x(bp)) { bnx2x_init_mac_obj()
2162 bnx2x_exe_queue_init(bp, bnx2x_init_mac_obj()
2179 bnx2x_exe_queue_init(bp, bnx2x_init_mac_obj()
2189 void bnx2x_init_vlan_obj(struct bnx2x *bp, bnx2x_init_vlan_obj() argument
2207 if (CHIP_IS_E1x(bp)) { bnx2x_init_vlan_obj()
2220 bnx2x_exe_queue_init(bp, bnx2x_init_vlan_obj()
2230 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, bnx2x_init_vlan_mac_obj() argument
2255 if (CHIP_IS_E1(bp)) { bnx2x_init_vlan_mac_obj()
2258 } else if (CHIP_IS_E1H(bp)) { bnx2x_init_vlan_mac_obj()
2266 bnx2x_exe_queue_init(bp, bnx2x_init_vlan_mac_obj()
2282 bnx2x_exe_queue_init(bp, bnx2x_init_vlan_mac_obj()
2293 static inline void __storm_memset_mac_filters(struct bnx2x *bp, __storm_memset_mac_filters() argument
2302 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); __storm_memset_mac_filters()
2305 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, bnx2x_set_rx_mode_e1x() argument
2308 /* update the bp MAC filter structure */ bnx2x_set_rx_mode_e1x()
2377 __storm_memset_mac_filters(bp, mac_filters, p->func_id); bnx2x_set_rx_mode_e1x()
2395 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, bnx2x_rx_mode_set_cmd_state_e2() argument
2444 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, bnx2x_set_rx_mode_e2() argument
2464 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, bnx2x_set_rx_mode_e2()
2477 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, bnx2x_set_rx_mode_e2()
2491 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); bnx2x_set_rx_mode_e2()
2497 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, bnx2x_set_rx_mode_e2()
2505 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); bnx2x_set_rx_mode_e2()
2511 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, bnx2x_set_rx_mode_e2()
2535 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, bnx2x_set_rx_mode_e2()
2546 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, bnx2x_wait_rx_mode_comp_e2() argument
2549 return bnx2x_state_wait(bp, p->state, p->pstate); bnx2x_wait_rx_mode_comp_e2()
2552 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, bnx2x_empty_rx_mode_wait() argument
2559 int bnx2x_config_rx_mode(struct bnx2x *bp, bnx2x_config_rx_mode() argument
2565 rc = p->rx_mode_obj->config_rx_mode(bp, p); bnx2x_config_rx_mode()
2571 rc = p->rx_mode_obj->wait_comp(bp, p); bnx2x_config_rx_mode()
2579 void bnx2x_init_rx_mode_obj(struct bnx2x *bp, bnx2x_init_rx_mode_obj() argument
2582 if (CHIP_IS_E1x(bp)) { bnx2x_init_rx_mode_obj()
2620 static int bnx2x_mcast_wait(struct bnx2x *bp, bnx2x_mcast_wait() argument
2623 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || bnx2x_mcast_wait()
2624 o->raw.wait_comp(bp, &o->raw)) bnx2x_mcast_wait()
2630 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, bnx2x_mcast_enqueue_cmd() argument
2762 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, bnx2x_mcast_set_one_rule_e2() argument
2817 * @bp: device handle
2825 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, bnx2x_mcast_handle_restore_cmd_e2()
2836 o->set_one_rule(bp, o, cnt, &cfg_data, bnx2x_mcast_handle_restore_cmd_e2()
2855 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_add_e2() argument
2867 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); bnx2x_mcast_hdl_pending_add_e2()
2890 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_del_e2() argument
2897 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); bnx2x_mcast_hdl_pending_del_e2()
2920 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, bnx2x_mcast_hdl_pending_restore_e2() argument
2924 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, bnx2x_mcast_hdl_pending_restore_e2()
2935 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, bnx2x_mcast_handle_pending_cmds_e2() argument
2946 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); bnx2x_mcast_handle_pending_cmds_e2()
2950 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); bnx2x_mcast_handle_pending_cmds_e2()
2954 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, bnx2x_mcast_handle_pending_cmds_e2()
2979 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, bnx2x_mcast_hdl_add() argument
2989 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); bnx2x_mcast_hdl_add()
3000 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, bnx2x_mcast_hdl_del() argument
3007 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); bnx2x_mcast_hdl_del()
3021 * @bp: device handle
3030 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, bnx2x_mcast_handle_current_cmd() argument
3042 bnx2x_mcast_hdl_add(bp, o, p, &cnt); bnx2x_mcast_handle_current_cmd()
3046 bnx2x_mcast_hdl_del(bp, o, p, &cnt); bnx2x_mcast_handle_current_cmd()
3050 o->hdl_restore(bp, o, 0, &cnt); bnx2x_mcast_handle_current_cmd()
3064 static int bnx2x_mcast_validate_e2(struct bnx2x *bp, bnx2x_mcast_validate_e2() argument
3109 static void bnx2x_mcast_revert_e2(struct bnx2x *bp, bnx2x_mcast_revert_e2() argument
3122 * @bp: device handle
3126 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, bnx2x_mcast_set_rdata_hdr_e2() argument
3143 * @bp: device handle
3151 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e2() argument
3168 static int bnx2x_mcast_setup_e2(struct bnx2x *bp, bnx2x_mcast_setup_e2() argument
3181 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); bnx2x_mcast_setup_e2()
3194 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); bnx2x_mcast_setup_e2()
3205 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); bnx2x_mcast_setup_e2()
3223 bnx2x_mcast_refresh_registry_e2(bp, o); bnx2x_mcast_setup_e2()
3240 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, bnx2x_mcast_setup_e2()
3252 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, bnx2x_mcast_validate_e1h() argument
3263 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, bnx2x_mcast_revert_e1h() argument
3275 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, bnx2x_mcast_hdl_add_e1h() argument
3296 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, bnx2x_mcast_hdl_restore_e1h() argument
3314 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, bnx2x_mcast_setup_e1h() argument
3333 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); bnx2x_mcast_setup_e1h()
3346 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); bnx2x_mcast_setup_e1h()
3356 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); bnx2x_mcast_setup_e1h()
3368 static int bnx2x_mcast_validate_e1(struct bnx2x *bp, bnx2x_mcast_validate_e1() argument
3423 static void bnx2x_mcast_revert_e1(struct bnx2x *bp, bnx2x_mcast_revert_e1() argument
3439 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, bnx2x_mcast_set_one_rule_e1() argument
3469 * @bp: device handle
3473 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, bnx2x_mcast_set_rdata_hdr_e1() argument
3481 u8 offset = (CHIP_REV_IS_SLOW(bp) ? bnx2x_mcast_set_rdata_hdr_e1()
3496 * @bp: device handle
3507 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, bnx2x_mcast_handle_restore_cmd_e1()
3517 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); bnx2x_mcast_handle_restore_cmd_e1()
3531 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) bnx2x_mcast_handle_pending_cmds_e1()
3551 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); bnx2x_mcast_handle_pending_cmds_e1()
3566 o->hdl_restore(bp, o, 0, &cnt); bnx2x_mcast_handle_pending_cmds_e1()
3602 * @bp: device handle
3610 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, bnx2x_mcast_refresh_registry_e1() argument
3657 static int bnx2x_mcast_setup_e1(struct bnx2x *bp, bnx2x_mcast_setup_e1() argument
3677 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); bnx2x_mcast_setup_e1()
3685 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); bnx2x_mcast_setup_e1()
3697 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); bnx2x_mcast_setup_e1()
3705 rc = bnx2x_mcast_refresh_registry_e1(bp, o); bnx2x_mcast_setup_e1()
3724 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, bnx2x_mcast_setup_e1()
3758 int bnx2x_config_mcast(struct bnx2x *bp, bnx2x_config_mcast() argument
3772 rc = o->validate(bp, p, cmd); bnx2x_config_mcast()
3788 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); bnx2x_config_mcast()
3804 rc = o->config_mcast(bp, p, cmd); bnx2x_config_mcast()
3810 rc = o->wait_comp(bp, o); bnx2x_config_mcast()
3819 o->revert(bp, p, old_reg_size); bnx2x_config_mcast()
3848 void bnx2x_init_mcast_obj(struct bnx2x *bp, bnx2x_init_mcast_obj() argument
3868 if (CHIP_IS_E1(bp)) { bnx2x_init_mcast_obj()
3875 if (CHIP_REV_IS_SLOW(bp)) bnx2x_init_mcast_obj()
3894 } else if (CHIP_IS_E1H(bp)) { bnx2x_init_mcast_obj()
4146 void bnx2x_init_mac_credit_pool(struct bnx2x *bp, bnx2x_init_mac_credit_pool() argument
4155 if (CHIP_IS_E1(bp)) { bnx2x_init_mac_credit_pool()
4157 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool()
4164 } else if (CHIP_IS_E1H(bp)) { bnx2x_init_mac_credit_pool()
4169 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool()
4185 if (!CHIP_REV_IS_SLOW(bp)) bnx2x_init_mac_credit_pool()
4186 cam_sz = PF_MAC_CREDIT_E2(bp, func_num); bnx2x_init_mac_credit_pool()
4201 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, bnx2x_init_vlan_credit_pool() argument
4206 if (CHIP_IS_E1x(bp)) { bnx2x_init_vlan_credit_pool()
4216 int credit = PF_VLAN_CREDIT_E2(bp, func_num); bnx2x_init_vlan_credit_pool()
4229 * @bp: driver handle
4234 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, bnx2x_debug_print_ind_table() argument
4258 * @bp: device handle
4263 static int bnx2x_setup_rss(struct bnx2x *bp, bnx2x_setup_rss() argument
4353 if (netif_msg_ifup(bp)) bnx2x_setup_rss()
4354 bnx2x_debug_print_ind_table(bp, p); bnx2x_setup_rss()
4364 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, bnx2x_setup_rss()
4381 int bnx2x_config_rss(struct bnx2x *bp, bnx2x_config_rss() argument
4397 rc = o->config_rss(bp, p); bnx2x_config_rss()
4404 rc = r->wait_comp(bp, r); bnx2x_config_rss()
4409 void bnx2x_init_rss_config_obj(struct bnx2x *bp, bnx2x_init_rss_config_obj() argument
4428 * @bp: device handle
4437 int bnx2x_queue_state_change(struct bnx2x *bp, bnx2x_queue_state_change() argument
4445 rc = o->check_transition(bp, o, params); bnx2x_queue_state_change()
4458 o->complete_cmd(bp, o, pending_bit); bnx2x_queue_state_change()
4461 rc = o->send_cmd(bp, params); bnx2x_queue_state_change()
4470 rc = o->wait_comp(bp, o, pending_bit); bnx2x_queue_state_change()
4499 static int bnx2x_queue_wait_comp(struct bnx2x *bp, bnx2x_queue_wait_comp() argument
4503 return bnx2x_state_wait(bp, cmd, &o->pending); bnx2x_queue_wait_comp()
4509 * @bp: device handle
4515 static int bnx2x_queue_comp_cmd(struct bnx2x *bp, bnx2x_queue_comp_cmd() argument
4558 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, bnx2x_q_fill_setup_data_e2() argument
4571 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, bnx2x_q_fill_init_general_data() argument
4727 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, bnx2x_q_fill_setup_data_cmn() argument
4731 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, bnx2x_q_fill_setup_data_cmn()
4752 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, bnx2x_q_fill_setup_tx_only() argument
4756 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, bnx2x_q_fill_setup_tx_only()
4775 * @bp: device handle
4783 static inline int bnx2x_q_init(struct bnx2x *bp, bnx2x_q_init() argument
4796 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, bnx2x_q_init()
4807 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, bnx2x_q_init()
4818 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); bnx2x_q_init()
4822 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); bnx2x_q_init()
4830 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, bnx2x_q_send_setup_e1x() argument
4843 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_send_setup_e1x()
4851 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], bnx2x_q_send_setup_e1x()
4856 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, bnx2x_q_send_setup_e2() argument
4869 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); bnx2x_q_send_setup_e2()
4870 bnx2x_q_fill_setup_data_e2(bp, params, rdata); bnx2x_q_send_setup_e2()
4878 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], bnx2x_q_send_setup_e2()
4883 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, bnx2x_q_send_setup_tx_only() argument
4909 bnx2x_q_fill_setup_tx_only(bp, params, rdata); bnx2x_q_send_setup_tx_only()
4921 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], bnx2x_q_send_setup_tx_only()
4926 static void bnx2x_q_fill_update_data(struct bnx2x *bp, bnx2x_q_fill_update_data() argument
4998 static inline int bnx2x_q_send_update(struct bnx2x *bp, bnx2x_q_send_update() argument
5019 bnx2x_q_fill_update_data(bp, o, update_params, rdata); bnx2x_q_send_update()
5027 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, bnx2x_q_send_update()
5035 * @bp: device handle
5040 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, bnx2x_q_send_deactivate() argument
5049 return bnx2x_q_send_update(bp, params); bnx2x_q_send_deactivate()
5055 * @bp: device handle
5060 static inline int bnx2x_q_send_activate(struct bnx2x *bp, bnx2x_q_send_activate() argument
5070 return bnx2x_q_send_update(bp, params); bnx2x_q_send_activate()
5073 static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, bnx2x_q_fill_update_tpa_data() argument
5095 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, bnx2x_q_send_update_tpa() argument
5110 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); bnx2x_q_send_update_tpa()
5125 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, bnx2x_q_send_update_tpa()
5131 static inline int bnx2x_q_send_halt(struct bnx2x *bp, bnx2x_q_send_halt() argument
5136 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, bnx2x_q_send_halt()
5141 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, bnx2x_q_send_cfc_del() argument
5153 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, bnx2x_q_send_cfc_del()
5157 static inline int bnx2x_q_send_terminate(struct bnx2x *bp, bnx2x_q_send_terminate() argument
5169 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, bnx2x_q_send_terminate()
5173 static inline int bnx2x_q_send_empty(struct bnx2x *bp, bnx2x_q_send_empty() argument
5178 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, bnx2x_q_send_empty()
5183 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, bnx2x_queue_send_cmd_cmn() argument
5188 return bnx2x_q_init(bp, params); bnx2x_queue_send_cmd_cmn()
5190 return bnx2x_q_send_setup_tx_only(bp, params); bnx2x_queue_send_cmd_cmn()
5192 return bnx2x_q_send_deactivate(bp, params); bnx2x_queue_send_cmd_cmn()
5194 return bnx2x_q_send_activate(bp, params); bnx2x_queue_send_cmd_cmn()
5196 return bnx2x_q_send_update(bp, params); bnx2x_queue_send_cmd_cmn()
5198 return bnx2x_q_send_update_tpa(bp, params); bnx2x_queue_send_cmd_cmn()
5200 return bnx2x_q_send_halt(bp, params); bnx2x_queue_send_cmd_cmn()
5202 return bnx2x_q_send_cfc_del(bp, params); bnx2x_queue_send_cmd_cmn()
5204 return bnx2x_q_send_terminate(bp, params); bnx2x_queue_send_cmd_cmn()
5206 return bnx2x_q_send_empty(bp, params); bnx2x_queue_send_cmd_cmn()
5213 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, bnx2x_queue_send_cmd_e1x() argument
5218 return bnx2x_q_send_setup_e1x(bp, params); bnx2x_queue_send_cmd_e1x()
5229 return bnx2x_queue_send_cmd_cmn(bp, params); bnx2x_queue_send_cmd_e1x()
5236 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, bnx2x_queue_send_cmd_e2() argument
5241 return bnx2x_q_send_setup_e2(bp, params); bnx2x_queue_send_cmd_e2()
5252 return bnx2x_queue_send_cmd_cmn(bp, params); bnx2x_queue_send_cmd_e2()
5262 * @bp: device handle
5275 static int bnx2x_queue_chk_transition(struct bnx2x *bp, bnx2x_queue_chk_transition() argument
5441 void bnx2x_init_queue_obj(struct bnx2x *bp, bnx2x_init_queue_obj() argument
5461 if (CHIP_IS_E1x(bp)) bnx2x_init_queue_obj()
5474 int bnx2x_get_q_logical_state(struct bnx2x *bp, bnx2x_get_q_logical_state() argument
5495 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, bnx2x_func_get_state() argument
5510 static int bnx2x_func_wait_comp(struct bnx2x *bp, bnx2x_func_wait_comp() argument
5514 return bnx2x_state_wait(bp, cmd, &o->pending); bnx2x_func_wait_comp()
5520 * @bp: device handle
5527 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, bnx2x_func_state_change_comp() argument
5535 cmd, BP_FUNC(bp), o->state, bnx2x_func_state_change_comp()
5542 cmd, BP_FUNC(bp), o->next_state); bnx2x_func_state_change_comp()
5561 * @bp: device handle
5567 static int bnx2x_func_comp_cmd(struct bnx2x *bp, bnx2x_func_comp_cmd() argument
5574 int rc = bnx2x_func_state_change_comp(bp, o, cmd); bnx2x_func_comp_cmd()
5581 * @bp: device handle
5593 static int bnx2x_func_chk_transition(struct bnx2x *bp, bnx2x_func_chk_transition() argument
5692 * @bp: device handle
5699 static inline int bnx2x_func_init_func(struct bnx2x *bp, bnx2x_func_init_func() argument
5702 return drv->init_hw_func(bp); bnx2x_func_init_func()
5708 * @bp: device handle
5716 static inline int bnx2x_func_init_port(struct bnx2x *bp, bnx2x_func_init_port() argument
5719 int rc = drv->init_hw_port(bp); bnx2x_func_init_port()
5723 return bnx2x_func_init_func(bp, drv); bnx2x_func_init_port()
5729 * @bp: device handle
5736 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, bnx2x_func_init_cmn_chip() argument
5739 int rc = drv->init_hw_cmn_chip(bp); bnx2x_func_init_cmn_chip()
5743 return bnx2x_func_init_port(bp, drv); bnx2x_func_init_cmn_chip()
5749 * @bp: device handle
5756 static inline int bnx2x_func_init_cmn(struct bnx2x *bp, bnx2x_func_init_cmn() argument
5759 int rc = drv->init_hw_cmn(bp); bnx2x_func_init_cmn()
5763 return bnx2x_func_init_port(bp, drv); bnx2x_func_init_cmn()
5766 static int bnx2x_func_hw_init(struct bnx2x *bp, bnx2x_func_hw_init() argument
5775 BP_ABS_FUNC(bp), load_code); bnx2x_func_hw_init()
5778 rc = drv->gunzip_init(bp); bnx2x_func_hw_init()
5783 rc = drv->init_fw(bp); bnx2x_func_hw_init()
5792 rc = bnx2x_func_init_cmn_chip(bp, drv); bnx2x_func_hw_init()
5798 rc = bnx2x_func_init_cmn(bp, drv); bnx2x_func_hw_init()
5804 rc = bnx2x_func_init_port(bp, drv); bnx2x_func_hw_init()
5810 rc = bnx2x_func_init_func(bp, drv); bnx2x_func_hw_init()
5821 drv->gunzip_end(bp); bnx2x_func_hw_init()
5827 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); bnx2x_func_hw_init()
5835 * @bp: device handle
5841 static inline void bnx2x_func_reset_func(struct bnx2x *bp, bnx2x_func_reset_func() argument
5844 drv->reset_hw_func(bp); bnx2x_func_reset_func()
5850 * @bp: device handle
5862 static inline void bnx2x_func_reset_port(struct bnx2x *bp, bnx2x_func_reset_port() argument
5865 drv->reset_hw_port(bp); bnx2x_func_reset_port()
5866 bnx2x_func_reset_func(bp, drv); bnx2x_func_reset_port()
5872 * @bp: device handle
5879 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, bnx2x_func_reset_cmn() argument
5882 bnx2x_func_reset_port(bp, drv); bnx2x_func_reset_cmn()
5883 drv->reset_hw_cmn(bp); bnx2x_func_reset_cmn()
5886 static inline int bnx2x_func_hw_reset(struct bnx2x *bp, bnx2x_func_hw_reset() argument
5893 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), bnx2x_func_hw_reset()
5898 bnx2x_func_reset_cmn(bp, drv); bnx2x_func_hw_reset()
5901 bnx2x_func_reset_port(bp, drv); bnx2x_func_hw_reset()
5904 bnx2x_func_reset_func(bp, drv); bnx2x_func_hw_reset()
5913 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); bnx2x_func_hw_reset()
5918 static inline int bnx2x_func_send_start(struct bnx2x *bp, bnx2x_func_send_start() argument
5932 rdata->path_id = BP_PATH(bp); bnx2x_func_send_start()
5974 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, bnx2x_func_send_start()
5979 static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, bnx2x_func_send_switch_update() argument
6053 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, bnx2x_func_send_switch_update()
6058 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, bnx2x_func_send_afex_update() argument
6091 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, bnx2x_func_send_afex_update()
6097 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, bnx2x_func_send_afex_viflists() argument
6130 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, bnx2x_func_send_afex_viflists()
6135 static inline int bnx2x_func_send_stop(struct bnx2x *bp, bnx2x_func_send_stop() argument
6138 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, bnx2x_func_send_stop()
6142 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, bnx2x_func_send_tx_stop() argument
6145 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, bnx2x_func_send_tx_stop()
6148 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, bnx2x_func_send_tx_start() argument
6177 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, bnx2x_func_send_tx_start()
6183 int bnx2x_func_send_set_timesync(struct bnx2x *bp, bnx2x_func_send_set_timesync() argument
6213 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, bnx2x_func_send_set_timesync()
6218 static int bnx2x_func_send_cmd(struct bnx2x *bp, bnx2x_func_send_cmd() argument
6223 return bnx2x_func_hw_init(bp, params); bnx2x_func_send_cmd()
6225 return bnx2x_func_send_start(bp, params); bnx2x_func_send_cmd()
6227 return bnx2x_func_send_stop(bp, params); bnx2x_func_send_cmd()
6229 return bnx2x_func_hw_reset(bp, params); bnx2x_func_send_cmd()
6231 return bnx2x_func_send_afex_update(bp, params); bnx2x_func_send_cmd()
6233 return bnx2x_func_send_afex_viflists(bp, params); bnx2x_func_send_cmd()
6235 return bnx2x_func_send_tx_stop(bp, params); bnx2x_func_send_cmd()
6237 return bnx2x_func_send_tx_start(bp, params); bnx2x_func_send_cmd()
6239 return bnx2x_func_send_switch_update(bp, params); bnx2x_func_send_cmd()
6241 return bnx2x_func_send_set_timesync(bp, params); bnx2x_func_send_cmd()
6248 void bnx2x_init_func_obj(struct bnx2x *bp, bnx2x_init_func_obj() argument
6273 * @bp: device handle
6283 int bnx2x_func_state_change(struct bnx2x *bp, bnx2x_func_state_change() argument
6294 rc = o->check_transition(bp, o, params); bnx2x_func_state_change()
6301 rc = o->check_transition(bp, o, params); bnx2x_func_state_change()
6318 bnx2x_func_state_change_comp(bp, o, cmd); bnx2x_func_state_change()
6322 rc = o->send_cmd(bp, params); bnx2x_func_state_change()
6334 rc = o->wait_comp(bp, o, cmd); bnx2x_func_state_change()
149 __bnx2x_exe_queue_reset_pending( struct bnx2x *bp, struct bnx2x_exe_queue_obj *o) __bnx2x_exe_queue_reset_pending() argument
248 bnx2x_exe_queue_alloc_elem( struct bnx2x *bp) bnx2x_exe_queue_alloc_elem() argument
775 bnx2x_check_move_always_err( struct bnx2x *bp, struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *dst_o, union bnx2x_classification_ramrod_data *data) bnx2x_check_move_always_err() argument
1724 bnx2x_vlan_mac_get_registry_elem( struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, struct bnx2x_exeq_elem *elem, bool restore, struct bnx2x_vlan_mac_registry_elem **re) bnx2x_vlan_mac_get_registry_elem() argument
1900 bnx2x_vlan_mac_push_new_cmd( struct bnx2x *bp, struct bnx2x_vlan_mac_ramrod_params *p) bnx2x_vlan_mac_push_new_cmd() argument
2824 bnx2x_mcast_handle_restore_cmd_e2( struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, int *rdata_idx) bnx2x_mcast_handle_restore_cmd_e2() argument
3506 bnx2x_mcast_handle_restore_cmd_e1( struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, int *rdata_idx) bnx2x_mcast_handle_restore_cmd_e1() argument
3530 bnx2x_mcast_handle_pending_cmds_e1( struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) bnx2x_mcast_handle_pending_cmds_e1() argument
H A Dbnx2x_init.h207 static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) bnx2x_map_q_cos() argument
210 u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); bnx2x_map_q_cos()
218 if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { bnx2x_map_q_cos()
220 if (BP_PORT(bp)) { bnx2x_map_q_cos()
229 BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); bnx2x_map_q_cos()
233 REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); bnx2x_map_q_cos()
237 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos()
238 REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); bnx2x_map_q_cos()
242 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos()
243 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); bnx2x_map_q_cos()
248 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { bnx2x_map_q_cos()
250 reg_bit_map = REG_RD(bp, reg_addr); bnx2x_map_q_cos()
255 REG_WR(bp, reg_addr, reg_bit_map); bnx2x_map_q_cos()
262 static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, bnx2x_dcb_config_qm() argument
265 bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, bnx2x_dcb_config_qm()
267 bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, bnx2x_dcb_config_qm()
269 bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, bnx2x_dcb_config_qm()
273 bnx2x_map_q_cos(bp, BNX2X_ETH_Q, bnx2x_dcb_config_qm()
275 bnx2x_map_q_cos(bp, BNX2X_TOE_Q, bnx2x_dcb_config_qm()
277 bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, bnx2x_dcb_config_qm()
576 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
578 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
580 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
581 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
582 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
676 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) bnx2x_set_mcp_parity() argument
682 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); bnx2x_set_mcp_parity()
689 REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); bnx2x_set_mcp_parity()
693 static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) bnx2x_parity_reg_mask() argument
695 if (CHIP_IS_E1(bp)) bnx2x_parity_reg_mask()
697 else if (CHIP_IS_E1H(bp)) bnx2x_parity_reg_mask()
699 else if (CHIP_IS_E2(bp)) bnx2x_parity_reg_mask()
705 static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) bnx2x_disable_blocks_parity() argument
710 u32 dis_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_disable_blocks_parity()
713 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, bnx2x_disable_blocks_parity()
722 bnx2x_set_mcp_parity(bp, false); bnx2x_disable_blocks_parity()
726 static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) bnx2x_clear_blocks_parity() argument
736 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity()
737 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity()
738 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity()
739 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); bnx2x_clear_blocks_parity()
742 u32 reg_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_clear_blocks_parity()
745 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. bnx2x_clear_blocks_parity()
756 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); bnx2x_clear_blocks_parity()
767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); bnx2x_clear_blocks_parity()
770 static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) bnx2x_enable_blocks_parity() argument
775 u32 reg_mask = bnx2x_parity_reg_mask(bp, i); bnx2x_enable_blocks_parity()
778 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, bnx2x_enable_blocks_parity()
783 bnx2x_set_mcp_parity(bp, true); bnx2x_enable_blocks_parity()
H A Dbnx2x_sp.h93 int (*wait_comp)(struct bnx2x *bp,
170 typedef int (*exe_q_validate)(struct bnx2x *bp,
174 typedef int (*exe_q_remove)(struct bnx2x *bp,
181 typedef int (*exe_q_optimize)(struct bnx2x *bp,
184 typedef int (*exe_q_execute)(struct bnx2x *bp,
326 int (*get_n_elements)(struct bnx2x *bp,
336 int (*check_add)(struct bnx2x *bp,
346 (*check_del)(struct bnx2x *bp,
355 bool (*check_move)(struct bnx2x *bp,
372 void (*set_one_rule)(struct bnx2x *bp,
385 * @param bp
394 int (*delete_all)(struct bnx2x *bp,
403 * @param bp
414 int (*restore)(struct bnx2x *bp,
421 * @param bp
435 int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
444 int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
497 int (*config_rx_mode)(struct bnx2x *bp,
500 int (*wait_comp)(struct bnx2x *bp,
582 int (*config_mcast)(struct bnx2x *bp,
589 * @param bp
597 int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
600 int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
604 void (*set_one_rule)(struct bnx2x *bp,
622 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
629 int (*validate)(struct bnx2x *bp,
636 void (*revert)(struct bnx2x *bp,
757 int (*config_rss)(struct bnx2x *bp,
1074 int (*send_cmd)(struct bnx2x *bp,
1086 int (*check_transition)(struct bnx2x *bp,
1093 int (*complete_cmd)(struct bnx2x *bp,
1097 int (*wait_comp)(struct bnx2x *bp,
1298 int (*init_hw_cmn_chip)(struct bnx2x *bp);
1299 int (*init_hw_cmn)(struct bnx2x *bp);
1300 int (*init_hw_port)(struct bnx2x *bp);
1301 int (*init_hw_func)(struct bnx2x *bp);
1304 void (*reset_hw_cmn)(struct bnx2x *bp);
1305 void (*reset_hw_port)(struct bnx2x *bp);
1306 void (*reset_hw_func)(struct bnx2x *bp);
1309 int (*gunzip_init)(struct bnx2x *bp);
1310 void (*gunzip_end)(struct bnx2x *bp);
1313 int (*init_fw)(struct bnx2x *bp);
1314 void (*release_fw)(struct bnx2x *bp);
1351 int (*send_cmd)(struct bnx2x *bp,
1357 int (*check_transition)(struct bnx2x *bp,
1364 int (*complete_cmd)(struct bnx2x *bp,
1368 int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
1378 void bnx2x_init_func_obj(struct bnx2x *bp,
1384 int bnx2x_func_state_change(struct bnx2x *bp,
1387 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
1390 void bnx2x_init_queue_obj(struct bnx2x *bp,
1395 int bnx2x_queue_state_change(struct bnx2x *bp,
1398 int bnx2x_get_q_logical_state(struct bnx2x *bp,
1402 void bnx2x_init_mac_obj(struct bnx2x *bp,
1409 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1416 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1424 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
1426 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
1428 int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
1430 int bnx2x_config_vlan_mac(struct bnx2x *bp,
1433 int bnx2x_vlan_mac_move(struct bnx2x *bp,
1439 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
1451 int bnx2x_config_rx_mode(struct bnx2x *bp,
1456 void bnx2x_init_mcast_obj(struct bnx2x *bp,
1483 int bnx2x_config_mcast(struct bnx2x *bp,
1488 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
1491 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
1498 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
1510 int bnx2x_config_rss(struct bnx2x *bp,
1523 #define PF_MAC_CREDIT_E2(bp, func_num) \
1524 ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
1525 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
1527 #define PF_VLAN_CREDIT_E2(bp, func_num) \
1528 ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
1529 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
H A Dbnx2x_stats.h541 void bnx2x_memset_stats(struct bnx2x *bp);
542 void bnx2x_stats_init(struct bnx2x *bp);
543 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
544 int bnx2x_stats_safe_exec(struct bnx2x *bp,
551 * @bp: driver handle
553 void bnx2x_save_statistics(struct bnx2x *bp);
555 void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
H A Dbnx2x_link.h321 struct bnx2x *bp; member in struct:link_params
427 int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
431 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
434 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
450 u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
540 void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
/linux-4.4.14/fs/xfs/
H A Dxfs_buf.c48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
63 struct xfs_buf *bp) xfs_buf_is_vmapped()
70 * to be both for b_addr and bp->b_page_count > 1. xfs_buf_is_vmapped()
72 return bp->b_addr && bp->b_page_count > 1; xfs_buf_is_vmapped()
77 struct xfs_buf *bp) xfs_buf_vmap_len()
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; xfs_buf_vmap_len()
92 struct xfs_buf *bp) xfs_buf_stale()
94 ASSERT(xfs_buf_islocked(bp)); xfs_buf_stale()
96 bp->b_flags |= XBF_STALE; xfs_buf_stale()
103 bp->b_flags &= ~_XBF_DELWRI_Q; xfs_buf_stale()
105 spin_lock(&bp->b_lock); xfs_buf_stale()
106 atomic_set(&bp->b_lru_ref, 0); xfs_buf_stale()
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && xfs_buf_stale()
108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) xfs_buf_stale()
109 atomic_dec(&bp->b_hold); xfs_buf_stale()
111 ASSERT(atomic_read(&bp->b_hold) >= 1); xfs_buf_stale()
112 spin_unlock(&bp->b_lock); xfs_buf_stale()
117 struct xfs_buf *bp, xfs_buf_get_maps()
120 ASSERT(bp->b_maps == NULL); xfs_buf_get_maps()
121 bp->b_map_count = map_count; xfs_buf_get_maps()
124 bp->b_maps = &bp->__b_map; xfs_buf_get_maps()
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), xfs_buf_get_maps()
130 if (!bp->b_maps) xfs_buf_get_maps()
140 struct xfs_buf *bp) xfs_buf_free_maps()
142 if (bp->b_maps != &bp->__b_map) { xfs_buf_free_maps()
143 kmem_free(bp->b_maps); xfs_buf_free_maps()
144 bp->b_maps = NULL; xfs_buf_free_maps()
155 struct xfs_buf *bp; _xfs_buf_alloc() local
159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); _xfs_buf_alloc()
160 if (unlikely(!bp)) _xfs_buf_alloc()
169 atomic_set(&bp->b_hold, 1); _xfs_buf_alloc()
170 atomic_set(&bp->b_lru_ref, 1); _xfs_buf_alloc()
171 init_completion(&bp->b_iowait); _xfs_buf_alloc()
172 INIT_LIST_HEAD(&bp->b_lru); _xfs_buf_alloc()
173 INIT_LIST_HEAD(&bp->b_list); _xfs_buf_alloc()
174 RB_CLEAR_NODE(&bp->b_rbnode); _xfs_buf_alloc()
175 sema_init(&bp->b_sema, 0); /* held, no waiters */ _xfs_buf_alloc()
176 spin_lock_init(&bp->b_lock); _xfs_buf_alloc()
177 XB_SET_OWNER(bp); _xfs_buf_alloc()
178 bp->b_target = target; _xfs_buf_alloc()
179 bp->b_flags = flags; _xfs_buf_alloc()
186 error = xfs_buf_get_maps(bp, nmaps); _xfs_buf_alloc()
188 kmem_zone_free(xfs_buf_zone, bp); _xfs_buf_alloc()
192 bp->b_bn = map[0].bm_bn; _xfs_buf_alloc()
193 bp->b_length = 0; _xfs_buf_alloc()
195 bp->b_maps[i].bm_bn = map[i].bm_bn; _xfs_buf_alloc()
196 bp->b_maps[i].bm_len = map[i].bm_len; _xfs_buf_alloc()
197 bp->b_length += map[i].bm_len; _xfs_buf_alloc()
199 bp->b_io_length = bp->b_length; _xfs_buf_alloc()
201 atomic_set(&bp->b_pin_count, 0); _xfs_buf_alloc()
202 init_waitqueue_head(&bp->b_waiters); _xfs_buf_alloc()
205 trace_xfs_buf_init(bp, _RET_IP_); _xfs_buf_alloc()
207 return bp; _xfs_buf_alloc()
216 xfs_buf_t *bp, _xfs_buf_get_pages()
220 if (bp->b_pages == NULL) { _xfs_buf_get_pages()
221 bp->b_page_count = page_count; _xfs_buf_get_pages()
223 bp->b_pages = bp->b_page_array; _xfs_buf_get_pages()
225 bp->b_pages = kmem_alloc(sizeof(struct page *) * _xfs_buf_get_pages()
227 if (bp->b_pages == NULL) _xfs_buf_get_pages()
230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); _xfs_buf_get_pages()
240 xfs_buf_t *bp) _xfs_buf_free_pages()
242 if (bp->b_pages != bp->b_page_array) { _xfs_buf_free_pages()
243 kmem_free(bp->b_pages); _xfs_buf_free_pages()
244 bp->b_pages = NULL; _xfs_buf_free_pages()
257 xfs_buf_t *bp) xfs_buf_free()
259 trace_xfs_buf_free(bp, _RET_IP_); xfs_buf_free()
261 ASSERT(list_empty(&bp->b_lru)); xfs_buf_free()
263 if (bp->b_flags & _XBF_PAGES) { xfs_buf_free()
266 if (xfs_buf_is_vmapped(bp)) xfs_buf_free()
267 vm_unmap_ram(bp->b_addr - bp->b_offset, xfs_buf_free()
268 bp->b_page_count); xfs_buf_free()
270 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_free()
271 struct page *page = bp->b_pages[i]; xfs_buf_free()
275 } else if (bp->b_flags & _XBF_KMEM) xfs_buf_free()
276 kmem_free(bp->b_addr); xfs_buf_free()
277 _xfs_buf_free_pages(bp); xfs_buf_free()
278 xfs_buf_free_maps(bp); xfs_buf_free()
279 kmem_zone_free(xfs_buf_zone, bp); xfs_buf_free()
287 xfs_buf_t *bp, xfs_buf_allocate_memory()
302 size = BBTOB(bp->b_length); xfs_buf_allocate_memory()
304 bp->b_addr = kmem_alloc(size, KM_NOFS); xfs_buf_allocate_memory()
305 if (!bp->b_addr) { xfs_buf_allocate_memory()
310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != xfs_buf_allocate_memory()
311 ((unsigned long)bp->b_addr & PAGE_MASK)) { xfs_buf_allocate_memory()
313 kmem_free(bp->b_addr); xfs_buf_allocate_memory()
314 bp->b_addr = NULL; xfs_buf_allocate_memory()
317 bp->b_offset = offset_in_page(bp->b_addr); xfs_buf_allocate_memory()
318 bp->b_pages = bp->b_page_array; xfs_buf_allocate_memory()
319 bp->b_pages[0] = virt_to_page(bp->b_addr); xfs_buf_allocate_memory()
320 bp->b_page_count = 1; xfs_buf_allocate_memory()
321 bp->b_flags |= _XBF_KMEM; xfs_buf_allocate_memory()
326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; xfs_buf_allocate_memory()
327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) xfs_buf_allocate_memory()
330 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_allocate_memory()
334 offset = bp->b_offset; xfs_buf_allocate_memory()
335 bp->b_flags |= _XBF_PAGES; xfs_buf_allocate_memory()
337 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_allocate_memory()
344 bp->b_page_count = i; xfs_buf_allocate_memory()
361 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); xfs_buf_allocate_memory()
366 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); xfs_buf_allocate_memory()
370 bp->b_pages[i] = page; xfs_buf_allocate_memory()
376 for (i = 0; i < bp->b_page_count; i++) xfs_buf_allocate_memory()
377 __free_page(bp->b_pages[i]); xfs_buf_allocate_memory()
386 xfs_buf_t *bp, _xfs_buf_map_pages()
389 ASSERT(bp->b_flags & _XBF_PAGES); _xfs_buf_map_pages()
390 if (bp->b_page_count == 1) { _xfs_buf_map_pages()
392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; _xfs_buf_map_pages()
394 bp->b_addr = NULL; _xfs_buf_map_pages()
409 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, _xfs_buf_map_pages()
411 if (bp->b_addr) _xfs_buf_map_pages()
417 if (!bp->b_addr) _xfs_buf_map_pages()
419 bp->b_addr += bp->b_offset; _xfs_buf_map_pages()
445 xfs_buf_t *bp; _xfs_buf_find() local
484 bp = NULL; _xfs_buf_find()
487 bp = rb_entry(parent, struct xfs_buf, b_rbnode); _xfs_buf_find()
489 if (blkno < bp->b_bn) _xfs_buf_find()
491 else if (blkno > bp->b_bn) _xfs_buf_find()
502 if (bp->b_length != numblks) { _xfs_buf_find()
503 ASSERT(bp->b_flags & XBF_STALE); _xfs_buf_find()
507 atomic_inc(&bp->b_hold); _xfs_buf_find()
530 if (!xfs_buf_trylock(bp)) { _xfs_buf_find()
532 xfs_buf_rele(bp); _xfs_buf_find()
536 xfs_buf_lock(bp); _xfs_buf_find()
545 if (bp->b_flags & XBF_STALE) { _xfs_buf_find()
546 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); _xfs_buf_find()
547 ASSERT(bp->b_iodone == NULL); _xfs_buf_find()
548 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; _xfs_buf_find()
549 bp->b_ops = NULL; _xfs_buf_find()
552 trace_xfs_buf_find(bp, flags, _RET_IP_); _xfs_buf_find()
554 return bp; _xfs_buf_find()
569 struct xfs_buf *bp; xfs_buf_get_map() local
573 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); xfs_buf_get_map()
574 if (likely(bp)) xfs_buf_get_map()
587 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); xfs_buf_get_map()
588 if (!bp) { xfs_buf_get_map()
593 if (bp != new_bp) xfs_buf_get_map()
597 if (!bp->b_addr) { xfs_buf_get_map()
598 error = _xfs_buf_map_pages(bp, flags); xfs_buf_get_map()
602 xfs_buf_relse(bp); xfs_buf_get_map()
612 xfs_buf_ioerror(bp, 0); xfs_buf_get_map()
615 trace_xfs_buf_get(bp, flags, _RET_IP_); xfs_buf_get_map()
616 return bp; xfs_buf_get_map()
621 xfs_buf_t *bp, _xfs_buf_read()
625 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); _xfs_buf_read()
627 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); _xfs_buf_read()
628 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); _xfs_buf_read()
631 xfs_buf_submit(bp); _xfs_buf_read()
634 return xfs_buf_submit_wait(bp); _xfs_buf_read()
645 struct xfs_buf *bp; xfs_buf_read_map() local
649 bp = xfs_buf_get_map(target, map, nmaps, flags); xfs_buf_read_map()
650 if (bp) { xfs_buf_read_map()
651 trace_xfs_buf_read(bp, flags, _RET_IP_); xfs_buf_read_map()
653 if (!XFS_BUF_ISDONE(bp)) { xfs_buf_read_map()
655 bp->b_ops = ops; xfs_buf_read_map()
656 _xfs_buf_read(bp, flags); xfs_buf_read_map()
662 xfs_buf_relse(bp); xfs_buf_read_map()
666 bp->b_flags &= ~XBF_READ; xfs_buf_read_map()
670 return bp; xfs_buf_read_map()
704 struct xfs_buf *bp; xfs_buf_read_uncached() local
708 bp = xfs_buf_get_uncached(target, numblks, flags); xfs_buf_read_uncached()
709 if (!bp) xfs_buf_read_uncached()
713 ASSERT(bp->b_map_count == 1); xfs_buf_read_uncached()
714 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ xfs_buf_read_uncached()
715 bp->b_maps[0].bm_bn = daddr; xfs_buf_read_uncached()
716 bp->b_flags |= XBF_READ; xfs_buf_read_uncached()
717 bp->b_ops = ops; xfs_buf_read_uncached()
719 xfs_buf_submit_wait(bp); xfs_buf_read_uncached()
720 if (bp->b_error) { xfs_buf_read_uncached()
721 int error = bp->b_error; xfs_buf_read_uncached()
722 xfs_buf_relse(bp); xfs_buf_read_uncached()
726 *bpp = bp; xfs_buf_read_uncached()
736 struct xfs_buf *bp, xfs_buf_set_empty()
739 if (bp->b_pages) xfs_buf_set_empty()
740 _xfs_buf_free_pages(bp); xfs_buf_set_empty()
742 bp->b_pages = NULL; xfs_buf_set_empty()
743 bp->b_page_count = 0; xfs_buf_set_empty()
744 bp->b_addr = NULL; xfs_buf_set_empty()
745 bp->b_length = numblks; xfs_buf_set_empty()
746 bp->b_io_length = numblks; xfs_buf_set_empty()
748 ASSERT(bp->b_map_count == 1); xfs_buf_set_empty()
749 bp->b_bn = XFS_BUF_DADDR_NULL; xfs_buf_set_empty()
750 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; xfs_buf_set_empty()
751 bp->b_maps[0].bm_len = bp->b_length; xfs_buf_set_empty()
767 xfs_buf_t *bp, xfs_buf_associate_memory()
784 if (bp->b_pages) xfs_buf_associate_memory()
785 _xfs_buf_free_pages(bp); xfs_buf_associate_memory()
787 bp->b_pages = NULL; xfs_buf_associate_memory()
788 bp->b_addr = mem; xfs_buf_associate_memory()
790 rval = _xfs_buf_get_pages(bp, page_count); xfs_buf_associate_memory()
794 bp->b_offset = offset; xfs_buf_associate_memory()
796 for (i = 0; i < bp->b_page_count; i++) { xfs_buf_associate_memory()
797 bp->b_pages[i] = mem_to_page((void *)pageaddr); xfs_buf_associate_memory()
801 bp->b_io_length = BTOBB(len); xfs_buf_associate_memory()
802 bp->b_length = BTOBB(buflen); xfs_buf_associate_memory()
815 struct xfs_buf *bp; xfs_buf_get_uncached() local
818 bp = _xfs_buf_alloc(target, &map, 1, 0); xfs_buf_get_uncached()
819 if (unlikely(bp == NULL)) xfs_buf_get_uncached()
823 error = _xfs_buf_get_pages(bp, page_count); xfs_buf_get_uncached()
828 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); xfs_buf_get_uncached()
829 if (!bp->b_pages[i]) xfs_buf_get_uncached()
832 bp->b_flags |= _XBF_PAGES; xfs_buf_get_uncached()
834 error = _xfs_buf_map_pages(bp, 0); xfs_buf_get_uncached()
841 trace_xfs_buf_get_uncached(bp, _RET_IP_); xfs_buf_get_uncached()
842 return bp; xfs_buf_get_uncached()
846 __free_page(bp->b_pages[i]); xfs_buf_get_uncached()
847 _xfs_buf_free_pages(bp); xfs_buf_get_uncached()
849 xfs_buf_free_maps(bp); xfs_buf_get_uncached()
850 kmem_zone_free(xfs_buf_zone, bp); xfs_buf_get_uncached()
862 xfs_buf_t *bp) xfs_buf_hold()
864 trace_xfs_buf_hold(bp, _RET_IP_); xfs_buf_hold()
865 atomic_inc(&bp->b_hold); xfs_buf_hold()
874 xfs_buf_t *bp) xfs_buf_rele()
876 struct xfs_perag *pag = bp->b_pag; xfs_buf_rele()
878 trace_xfs_buf_rele(bp, _RET_IP_); xfs_buf_rele()
881 ASSERT(list_empty(&bp->b_lru)); xfs_buf_rele()
882 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); xfs_buf_rele()
883 if (atomic_dec_and_test(&bp->b_hold)) xfs_buf_rele()
884 xfs_buf_free(bp); xfs_buf_rele()
888 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); xfs_buf_rele()
890 ASSERT(atomic_read(&bp->b_hold) > 0); xfs_buf_rele()
891 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { xfs_buf_rele()
892 spin_lock(&bp->b_lock); xfs_buf_rele()
893 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { xfs_buf_rele()
899 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { xfs_buf_rele()
900 bp->b_state &= ~XFS_BSTATE_DISPOSE; xfs_buf_rele()
901 atomic_inc(&bp->b_hold); xfs_buf_rele()
903 spin_unlock(&bp->b_lock); xfs_buf_rele()
912 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { xfs_buf_rele()
913 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); xfs_buf_rele()
915 ASSERT(list_empty(&bp->b_lru)); xfs_buf_rele()
917 spin_unlock(&bp->b_lock); xfs_buf_rele()
919 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); xfs_buf_rele()
920 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); xfs_buf_rele()
923 xfs_buf_free(bp); xfs_buf_rele()
942 struct xfs_buf *bp) xfs_buf_trylock()
946 locked = down_trylock(&bp->b_sema) == 0; xfs_buf_trylock()
948 XB_SET_OWNER(bp); xfs_buf_trylock()
950 trace_xfs_buf_trylock(bp, _RET_IP_); xfs_buf_trylock()
965 struct xfs_buf *bp) xfs_buf_lock()
967 trace_xfs_buf_lock(bp, _RET_IP_); xfs_buf_lock()
969 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_buf_lock()
970 xfs_log_force(bp->b_target->bt_mount, 0); xfs_buf_lock()
971 down(&bp->b_sema); xfs_buf_lock()
972 XB_SET_OWNER(bp); xfs_buf_lock()
974 trace_xfs_buf_lock_done(bp, _RET_IP_); xfs_buf_lock()
979 struct xfs_buf *bp) xfs_buf_unlock()
981 XB_CLEAR_OWNER(bp); xfs_buf_unlock()
982 up(&bp->b_sema); xfs_buf_unlock()
984 trace_xfs_buf_unlock(bp, _RET_IP_); xfs_buf_unlock()
989 xfs_buf_t *bp) xfs_buf_wait_unpin()
993 if (atomic_read(&bp->b_pin_count) == 0) xfs_buf_wait_unpin()
996 add_wait_queue(&bp->b_waiters, &wait); xfs_buf_wait_unpin()
999 if (atomic_read(&bp->b_pin_count) == 0) xfs_buf_wait_unpin()
1003 remove_wait_queue(&bp->b_waiters, &wait); xfs_buf_wait_unpin()
1013 struct xfs_buf *bp) xfs_buf_ioend()
1015 bool read = bp->b_flags & XBF_READ; xfs_buf_ioend()
1017 trace_xfs_buf_iodone(bp, _RET_IP_); xfs_buf_ioend()
1019 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); xfs_buf_ioend()
1025 if (!bp->b_error && bp->b_io_error) xfs_buf_ioend()
1026 xfs_buf_ioerror(bp, bp->b_io_error); xfs_buf_ioend()
1029 if (read && !bp->b_error && bp->b_ops) { xfs_buf_ioend()
1030 ASSERT(!bp->b_iodone); xfs_buf_ioend()
1031 bp->b_ops->verify_read(bp); xfs_buf_ioend()
1034 if (!bp->b_error) xfs_buf_ioend()
1035 bp->b_flags |= XBF_DONE; xfs_buf_ioend()
1037 if (bp->b_iodone) xfs_buf_ioend()
1038 (*(bp->b_iodone))(bp); xfs_buf_ioend()
1039 else if (bp->b_flags & XBF_ASYNC) xfs_buf_ioend()
1040 xfs_buf_relse(bp); xfs_buf_ioend()
1042 complete(&bp->b_iowait); xfs_buf_ioend()
1049 struct xfs_buf *bp = xfs_buf_ioend_work() local
1052 xfs_buf_ioend(bp); xfs_buf_ioend_work()
1057 struct xfs_buf *bp) xfs_buf_ioend_async()
1059 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); xfs_buf_ioend_async()
1060 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); xfs_buf_ioend_async()
1065 xfs_buf_t *bp, xfs_buf_ioerror()
1069 bp->b_error = error; xfs_buf_ioerror()
1070 trace_xfs_buf_ioerror(bp, error, _RET_IP_); xfs_buf_ioerror()
1075 struct xfs_buf *bp, xfs_buf_ioerror_alert()
1078 xfs_alert(bp->b_target->bt_mount, xfs_buf_ioerror_alert()
1080 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); xfs_buf_ioerror_alert()
1085 struct xfs_buf *bp) xfs_bwrite()
1089 ASSERT(xfs_buf_islocked(bp)); xfs_bwrite()
1091 bp->b_flags |= XBF_WRITE; xfs_bwrite()
1092 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | xfs_bwrite()
1095 error = xfs_buf_submit_wait(bp); xfs_bwrite()
1097 xfs_force_shutdown(bp->b_target->bt_mount, xfs_bwrite()
1107 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; xfs_buf_bio_end_io() local
1114 spin_lock(&bp->b_lock); xfs_buf_bio_end_io()
1115 if (!bp->b_io_error) xfs_buf_bio_end_io()
1116 bp->b_io_error = bio->bi_error; xfs_buf_bio_end_io()
1117 spin_unlock(&bp->b_lock); xfs_buf_bio_end_io()
1120 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) xfs_buf_bio_end_io()
1121 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); xfs_buf_bio_end_io()
1123 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_bio_end_io()
1124 xfs_buf_ioend_async(bp); xfs_buf_bio_end_io()
1130 struct xfs_buf *bp, xfs_buf_ioapply_map()
1137 int total_nr_pages = bp->b_page_count; xfs_buf_ioapply_map()
1140 sector_t sector = bp->b_maps[map].bm_bn; xfs_buf_ioapply_map()
1144 total_nr_pages = bp->b_page_count; xfs_buf_ioapply_map()
1158 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); xfs_buf_ioapply_map()
1163 atomic_inc(&bp->b_io_remaining); xfs_buf_ioapply_map()
1169 bio->bi_bdev = bp->b_target->bt_bdev; xfs_buf_ioapply_map()
1172 bio->bi_private = bp; xfs_buf_ioapply_map()
1181 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, xfs_buf_ioapply_map()
1193 if (xfs_buf_is_vmapped(bp)) { xfs_buf_ioapply_map()
1194 flush_kernel_vmap_range(bp->b_addr, xfs_buf_ioapply_map()
1195 xfs_buf_vmap_len(bp)); xfs_buf_ioapply_map()
1205 atomic_dec(&bp->b_io_remaining); xfs_buf_ioapply_map()
1206 xfs_buf_ioerror(bp, -EIO); xfs_buf_ioapply_map()
1214 struct xfs_buf *bp) _xfs_buf_ioapply()
1226 bp->b_error = 0; _xfs_buf_ioapply()
1232 if (!bp->b_ioend_wq) _xfs_buf_ioapply()
1233 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; _xfs_buf_ioapply()
1235 if (bp->b_flags & XBF_WRITE) { _xfs_buf_ioapply()
1236 if (bp->b_flags & XBF_SYNCIO) _xfs_buf_ioapply()
1240 if (bp->b_flags & XBF_FUA) _xfs_buf_ioapply()
1242 if (bp->b_flags & XBF_FLUSH) _xfs_buf_ioapply()
1250 if (bp->b_ops) { _xfs_buf_ioapply()
1251 bp->b_ops->verify_write(bp); _xfs_buf_ioapply()
1252 if (bp->b_error) { _xfs_buf_ioapply()
1253 xfs_force_shutdown(bp->b_target->bt_mount, _xfs_buf_ioapply()
1257 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { _xfs_buf_ioapply()
1258 struct xfs_mount *mp = bp->b_target->bt_mount; _xfs_buf_ioapply()
1267 __func__, bp->b_bn, bp->b_length); _xfs_buf_ioapply()
1268 xfs_hex_dump(bp->b_addr, 64); _xfs_buf_ioapply()
1272 } else if (bp->b_flags & XBF_READ_AHEAD) { _xfs_buf_ioapply()
1287 offset = bp->b_offset; _xfs_buf_ioapply()
1288 size = BBTOB(bp->b_io_length); _xfs_buf_ioapply()
1290 for (i = 0; i < bp->b_map_count; i++) { _xfs_buf_ioapply()
1291 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); _xfs_buf_ioapply()
1292 if (bp->b_error) _xfs_buf_ioapply()
1308 struct xfs_buf *bp) xfs_buf_submit()
1310 trace_xfs_buf_submit(bp, _RET_IP_); xfs_buf_submit()
1312 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); xfs_buf_submit()
1313 ASSERT(bp->b_flags & XBF_ASYNC); xfs_buf_submit()
1316 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xfs_buf_submit()
1317 xfs_buf_ioerror(bp, -EIO); xfs_buf_submit()
1318 bp->b_flags &= ~XBF_DONE; xfs_buf_submit()
1319 xfs_buf_stale(bp); xfs_buf_submit()
1320 xfs_buf_ioend(bp); xfs_buf_submit()
1324 if (bp->b_flags & XBF_WRITE) xfs_buf_submit()
1325 xfs_buf_wait_unpin(bp); xfs_buf_submit()
1328 bp->b_io_error = 0; xfs_buf_submit()
1338 xfs_buf_hold(bp); xfs_buf_submit()
1345 atomic_set(&bp->b_io_remaining, 1); xfs_buf_submit()
1346 _xfs_buf_ioapply(bp); xfs_buf_submit()
1353 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { xfs_buf_submit()
1354 if (bp->b_error) xfs_buf_submit()
1355 xfs_buf_ioend(bp); xfs_buf_submit()
1357 xfs_buf_ioend_async(bp); xfs_buf_submit()
1360 xfs_buf_rele(bp); xfs_buf_submit()
1361 /* Note: it is not safe to reference bp now we've dropped our ref */ xfs_buf_submit()
1369 struct xfs_buf *bp) xfs_buf_submit_wait()
1373 trace_xfs_buf_submit_wait(bp, _RET_IP_); xfs_buf_submit_wait()
1375 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); xfs_buf_submit_wait()
1377 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xfs_buf_submit_wait()
1378 xfs_buf_ioerror(bp, -EIO); xfs_buf_submit_wait()
1379 xfs_buf_stale(bp); xfs_buf_submit_wait()
1380 bp->b_flags &= ~XBF_DONE; xfs_buf_submit_wait()
1384 if (bp->b_flags & XBF_WRITE) xfs_buf_submit_wait()
1385 xfs_buf_wait_unpin(bp); xfs_buf_submit_wait()
1388 bp->b_io_error = 0; xfs_buf_submit_wait()
1396 xfs_buf_hold(bp); xfs_buf_submit_wait()
1403 atomic_set(&bp->b_io_remaining, 1); xfs_buf_submit_wait()
1404 _xfs_buf_ioapply(bp); xfs_buf_submit_wait()
1410 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_submit_wait()
1411 xfs_buf_ioend(bp); xfs_buf_submit_wait()
1414 trace_xfs_buf_iowait(bp, _RET_IP_); xfs_buf_submit_wait()
1415 wait_for_completion(&bp->b_iowait); xfs_buf_submit_wait()
1416 trace_xfs_buf_iowait_done(bp, _RET_IP_); xfs_buf_submit_wait()
1417 error = bp->b_error; xfs_buf_submit_wait()
1423 xfs_buf_rele(bp); xfs_buf_submit_wait()
1429 struct xfs_buf *bp, xfs_buf_offset()
1434 if (bp->b_addr) xfs_buf_offset()
1435 return bp->b_addr + offset; xfs_buf_offset()
1437 offset += bp->b_offset; xfs_buf_offset()
1438 page = bp->b_pages[offset >> PAGE_SHIFT]; xfs_buf_offset()
1447 xfs_buf_t *bp, /* buffer to process */ xfs_buf_iomove()
1460 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; xfs_buf_iomove()
1461 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; xfs_buf_iomove()
1462 page = bp->b_pages[page_index]; xfs_buf_iomove()
1464 BBTOB(bp->b_io_length) - boff); xfs_buf_iomove()
1501 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); xfs_buftarg_wait_rele() local
1504 if (atomic_read(&bp->b_hold) > 1) { xfs_buftarg_wait_rele()
1506 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); xfs_buftarg_wait_rele()
1509 if (!spin_trylock(&bp->b_lock)) xfs_buftarg_wait_rele()
1516 atomic_set(&bp->b_lru_ref, 0); xfs_buftarg_wait_rele()
1517 bp->b_state |= XFS_BSTATE_DISPOSE; xfs_buftarg_wait_rele()
1519 spin_unlock(&bp->b_lock); xfs_buftarg_wait_rele()
1546 struct xfs_buf *bp; xfs_wait_buftarg() local
1547 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); xfs_wait_buftarg()
1548 list_del_init(&bp->b_lru); xfs_wait_buftarg()
1549 if (bp->b_flags & XBF_WRITE_FAIL) { xfs_wait_buftarg()
1552 (long long)bp->b_bn); xfs_wait_buftarg()
1556 xfs_buf_rele(bp); xfs_wait_buftarg()
1570 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); xfs_buftarg_isolate() local
1574 * we are inverting the lru lock/bp->b_lock here, so use a trylock. xfs_buftarg_isolate()
1577 if (!spin_trylock(&bp->b_lock)) xfs_buftarg_isolate()
1584 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { xfs_buftarg_isolate()
1585 spin_unlock(&bp->b_lock); xfs_buftarg_isolate()
1589 bp->b_state |= XFS_BSTATE_DISPOSE; xfs_buftarg_isolate()
1591 spin_unlock(&bp->b_lock); xfs_buftarg_isolate()
1609 struct xfs_buf *bp; xfs_buftarg_shrink_scan() local
1610 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); xfs_buftarg_shrink_scan()
1611 list_del_init(&bp->b_lru); xfs_buftarg_shrink_scan()
1612 xfs_buf_rele(bp); xfs_buftarg_shrink_scan()
1727 struct xfs_buf *bp, xfs_buf_delwri_queue()
1730 ASSERT(xfs_buf_islocked(bp)); xfs_buf_delwri_queue()
1731 ASSERT(!(bp->b_flags & XBF_READ)); xfs_buf_delwri_queue()
1738 if (bp->b_flags & _XBF_DELWRI_Q) { xfs_buf_delwri_queue()
1739 trace_xfs_buf_delwri_queued(bp, _RET_IP_); xfs_buf_delwri_queue()
1743 trace_xfs_buf_delwri_queue(bp, _RET_IP_); xfs_buf_delwri_queue()
1753 bp->b_flags |= _XBF_DELWRI_Q; xfs_buf_delwri_queue()
1754 if (list_empty(&bp->b_list)) { xfs_buf_delwri_queue()
1755 atomic_inc(&bp->b_hold); xfs_buf_delwri_queue()
1756 list_add_tail(&bp->b_list, list); xfs_buf_delwri_queue()
1774 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); xfs_buf_cmp() local
1777 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; xfs_buf_cmp()
1792 struct xfs_buf *bp, *n; __xfs_buf_delwri_submit() local
1795 list_for_each_entry_safe(bp, n, buffer_list, b_list) { list_for_each_entry_safe()
1797 if (xfs_buf_ispinned(bp)) { list_for_each_entry_safe()
1801 if (!xfs_buf_trylock(bp)) list_for_each_entry_safe()
1804 xfs_buf_lock(bp); list_for_each_entry_safe()
1813 if (!(bp->b_flags & _XBF_DELWRI_Q)) { list_for_each_entry_safe()
1814 list_del_init(&bp->b_list); list_for_each_entry_safe()
1815 xfs_buf_relse(bp); list_for_each_entry_safe()
1819 list_move_tail(&bp->b_list, io_list); list_for_each_entry_safe()
1820 trace_xfs_buf_delwri_split(bp, _RET_IP_); list_for_each_entry_safe()
1826 list_for_each_entry_safe(bp, n, io_list, b_list) { list_for_each_entry_safe()
1827 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); list_for_each_entry_safe()
1828 bp->b_flags |= XBF_WRITE | XBF_ASYNC; list_for_each_entry_safe()
1836 xfs_buf_hold(bp); list_for_each_entry_safe()
1838 list_del_init(&bp->b_list); list_for_each_entry_safe()
1840 xfs_buf_submit(bp); list_for_each_entry_safe()
1878 struct xfs_buf *bp; xfs_buf_delwri_submit() local
1884 bp = list_first_entry(&io_list, struct xfs_buf, b_list); xfs_buf_delwri_submit()
1886 list_del_init(&bp->b_list); xfs_buf_delwri_submit()
1889 xfs_buf_lock(bp); xfs_buf_delwri_submit()
1890 error2 = bp->b_error; xfs_buf_delwri_submit()
1891 xfs_buf_relse(bp); xfs_buf_delwri_submit()
62 xfs_buf_is_vmapped( struct xfs_buf *bp) xfs_buf_is_vmapped() argument
76 xfs_buf_vmap_len( struct xfs_buf *bp) xfs_buf_vmap_len() argument
91 xfs_buf_stale( struct xfs_buf *bp) xfs_buf_stale() argument
116 xfs_buf_get_maps( struct xfs_buf *bp, int map_count) xfs_buf_get_maps() argument
139 xfs_buf_free_maps( struct xfs_buf *bp) xfs_buf_free_maps() argument
215 _xfs_buf_get_pages( xfs_buf_t *bp, int page_count) _xfs_buf_get_pages() argument
239 _xfs_buf_free_pages( xfs_buf_t *bp) _xfs_buf_free_pages() argument
256 xfs_buf_free( xfs_buf_t *bp) xfs_buf_free() argument
286 xfs_buf_allocate_memory( xfs_buf_t *bp, uint flags) xfs_buf_allocate_memory() argument
385 _xfs_buf_map_pages( xfs_buf_t *bp, uint flags) _xfs_buf_map_pages() argument
620 _xfs_buf_read( xfs_buf_t *bp, xfs_buf_flags_t flags) _xfs_buf_read() argument
735 xfs_buf_set_empty( struct xfs_buf *bp, size_t numblks) xfs_buf_set_empty() argument
766 xfs_buf_associate_memory( xfs_buf_t *bp, void *mem, size_t len) xfs_buf_associate_memory() argument
861 xfs_buf_hold( xfs_buf_t *bp) xfs_buf_hold() argument
873 xfs_buf_rele( xfs_buf_t *bp) xfs_buf_rele() argument
941 xfs_buf_trylock( struct xfs_buf *bp) xfs_buf_trylock() argument
964 xfs_buf_lock( struct xfs_buf *bp) xfs_buf_lock() argument
978 xfs_buf_unlock( struct xfs_buf *bp) xfs_buf_unlock() argument
988 xfs_buf_wait_unpin( xfs_buf_t *bp) xfs_buf_wait_unpin() argument
1012 xfs_buf_ioend( struct xfs_buf *bp) xfs_buf_ioend() argument
1056 xfs_buf_ioend_async( struct xfs_buf *bp) xfs_buf_ioend_async() argument
1064 xfs_buf_ioerror( xfs_buf_t *bp, int error) xfs_buf_ioerror() argument
1074 xfs_buf_ioerror_alert( struct xfs_buf *bp, const char *func) xfs_buf_ioerror_alert() argument
1084 xfs_bwrite( struct xfs_buf *bp) xfs_bwrite() argument
1129 xfs_buf_ioapply_map( struct xfs_buf *bp, int map, int *buf_offset, int *count, int rw) xfs_buf_ioapply_map() argument
1213 _xfs_buf_ioapply( struct xfs_buf *bp) _xfs_buf_ioapply() argument
1307 xfs_buf_submit( struct xfs_buf *bp) xfs_buf_submit() argument
1368 xfs_buf_submit_wait( struct xfs_buf *bp) xfs_buf_submit_wait() argument
1428 xfs_buf_offset( struct xfs_buf *bp, size_t offset) xfs_buf_offset() argument
1446 xfs_buf_iomove( xfs_buf_t *bp, size_t boff, size_t bsize, void *data, xfs_buf_rw_t mode) xfs_buf_iomove() argument
1726 xfs_buf_delwri_queue( struct xfs_buf *bp, struct list_head *list) xfs_buf_delwri_queue() argument
H A Dxfs_buf.h270 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
271 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
278 void xfs_buf_hold(struct xfs_buf *bp);
288 #define xfs_buf_islocked(bp) \
289 ((bp)->b_sema.count <= 0)
292 extern int xfs_bwrite(struct xfs_buf *bp);
293 extern void xfs_buf_ioend(struct xfs_buf *bp);
296 extern void xfs_buf_submit(struct xfs_buf *bp);
297 extern int xfs_buf_submit_wait(struct xfs_buf *bp);
300 #define xfs_buf_zero(bp, off, len) \
301 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
315 #define XFS_BUF_ZEROFLAGS(bp) \
316 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
320 void xfs_buf_stale(struct xfs_buf *bp);
321 #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
322 #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
324 #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
325 #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
326 #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
328 #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
329 #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
330 #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
332 #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
333 #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
334 #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
336 #define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
337 #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
338 #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
350 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
351 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
353 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) xfs_buf_set_ref() argument
355 atomic_set(&bp->b_lru_ref, lru_ref); xfs_buf_set_ref()
358 static inline int xfs_buf_ispinned(struct xfs_buf *bp) xfs_buf_ispinned() argument
360 return atomic_read(&bp->b_pin_count); xfs_buf_ispinned()
363 static inline void xfs_buf_relse(xfs_buf_t *bp) xfs_buf_relse() argument
365 xfs_buf_unlock(bp); xfs_buf_relse()
366 xfs_buf_rele(bp); xfs_buf_relse()
370 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) xfs_buf_verify_cksum() argument
372 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), xfs_buf_verify_cksum()
377 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) xfs_buf_update_cksum() argument
379 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), xfs_buf_update_cksum()
H A Dxfs_fsops.c129 struct xfs_buf *bp; xfs_growfs_get_hdr_buf() local
131 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags); xfs_growfs_get_hdr_buf()
132 if (!bp) xfs_growfs_get_hdr_buf()
135 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); xfs_growfs_get_hdr_buf()
136 bp->b_bn = blkno; xfs_growfs_get_hdr_buf()
137 bp->b_maps[0].bm_bn = blkno; xfs_growfs_get_hdr_buf()
138 bp->b_ops = ops; xfs_growfs_get_hdr_buf()
140 return bp; xfs_growfs_get_hdr_buf()
155 xfs_buf_t *bp; xfs_growfs_data_private() local
177 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); xfs_growfs_data_private()
180 xfs_buf_relse(bp); xfs_growfs_data_private()
222 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
226 if (!bp) { xfs_growfs_data_private()
231 agf = XFS_BUF_TO_AGF(bp); xfs_growfs_data_private()
255 error = xfs_bwrite(bp); xfs_growfs_data_private()
256 xfs_buf_relse(bp); xfs_growfs_data_private()
263 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
267 if (!bp) { xfs_growfs_data_private()
272 agfl = XFS_BUF_TO_AGFL(bp); xfs_growfs_data_private()
279 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp); xfs_growfs_data_private()
283 error = xfs_bwrite(bp); xfs_growfs_data_private()
284 xfs_buf_relse(bp); xfs_growfs_data_private()
291 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
295 if (!bp) { xfs_growfs_data_private()
300 agi = XFS_BUF_TO_AGI(bp); xfs_growfs_data_private()
320 error = xfs_bwrite(bp); xfs_growfs_data_private()
321 xfs_buf_relse(bp); xfs_growfs_data_private()
328 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
333 if (!bp) { xfs_growfs_data_private()
339 xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1, xfs_growfs_data_private()
342 xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1, xfs_growfs_data_private()
345 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); xfs_growfs_data_private()
350 error = xfs_bwrite(bp); xfs_growfs_data_private()
351 xfs_buf_relse(bp); xfs_growfs_data_private()
358 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
362 if (!bp) { xfs_growfs_data_private()
368 xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1, xfs_growfs_data_private()
371 xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1, xfs_growfs_data_private()
374 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1); xfs_growfs_data_private()
380 error = xfs_bwrite(bp); xfs_growfs_data_private()
381 xfs_buf_relse(bp); xfs_growfs_data_private()
388 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
392 if (!bp) { xfs_growfs_data_private()
398 xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0, xfs_growfs_data_private()
401 xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0, xfs_growfs_data_private()
404 error = xfs_bwrite(bp); xfs_growfs_data_private()
405 xfs_buf_relse(bp); xfs_growfs_data_private()
413 bp = xfs_growfs_get_hdr_buf(mp, xfs_growfs_data_private()
417 if (!bp) { xfs_growfs_data_private()
423 xfs_btree_init_block(mp, bp, XFS_FIBT_CRC_MAGIC, xfs_growfs_data_private()
427 xfs_btree_init_block(mp, bp, XFS_FIBT_MAGIC, 0, xfs_growfs_data_private()
430 error = xfs_bwrite(bp); xfs_growfs_data_private()
431 xfs_buf_relse(bp); xfs_growfs_data_private()
445 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); xfs_growfs_data_private()
449 ASSERT(bp); xfs_growfs_data_private()
450 agi = XFS_BUF_TO_AGI(bp); xfs_growfs_data_private()
454 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); xfs_growfs_data_private()
458 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); xfs_growfs_data_private()
462 ASSERT(bp); xfs_growfs_data_private()
463 agf = XFS_BUF_TO_AGF(bp); xfs_growfs_data_private()
468 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); xfs_growfs_data_private()
520 XFS_FSS_TO_BB(mp, 1), 0, &bp, xfs_growfs_data_private()
523 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp, xfs_growfs_data_private()
526 if (bp) { xfs_growfs_data_private()
527 bp->b_ops = &xfs_sb_buf_ops; xfs_growfs_data_private()
528 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); xfs_growfs_data_private()
547 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); xfs_growfs_data_private()
549 error = xfs_bwrite(bp); xfs_growfs_data_private()
550 xfs_buf_relse(bp); xfs_growfs_data_private()
H A Dxfs_trans_buf.c77 struct xfs_buf *bp, _xfs_trans_bjoin()
82 ASSERT(bp->b_transp == NULL); _xfs_trans_bjoin()
89 xfs_buf_item_init(bp, tp->t_mountp); _xfs_trans_bjoin()
90 bip = bp->b_fspriv; _xfs_trans_bjoin()
111 bp->b_transp = tp; _xfs_trans_bjoin()
118 struct xfs_buf *bp) xfs_trans_bjoin()
120 _xfs_trans_bjoin(tp, bp, 0); xfs_trans_bjoin()
121 trace_xfs_trans_bjoin(bp->b_fspriv); xfs_trans_bjoin()
141 xfs_buf_t *bp; xfs_trans_get_buf_map() local
153 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); xfs_trans_get_buf_map()
154 if (bp != NULL) { xfs_trans_get_buf_map()
155 ASSERT(xfs_buf_islocked(bp)); xfs_trans_get_buf_map()
157 xfs_buf_stale(bp); xfs_trans_get_buf_map()
158 XFS_BUF_DONE(bp); xfs_trans_get_buf_map()
161 ASSERT(bp->b_transp == tp); xfs_trans_get_buf_map()
162 bip = bp->b_fspriv; xfs_trans_get_buf_map()
167 return bp; xfs_trans_get_buf_map()
170 bp = xfs_buf_get_map(target, map, nmaps, flags); xfs_trans_get_buf_map()
171 if (bp == NULL) { xfs_trans_get_buf_map()
175 ASSERT(!bp->b_error); xfs_trans_get_buf_map()
177 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_get_buf_map()
178 trace_xfs_trans_get_buf(bp->b_fspriv); xfs_trans_get_buf_map()
179 return bp; xfs_trans_get_buf_map()
195 xfs_buf_t *bp; xfs_trans_getsb() local
211 bp = mp->m_sb_bp; xfs_trans_getsb()
212 if (bp->b_transp == tp) { xfs_trans_getsb()
213 bip = bp->b_fspriv; xfs_trans_getsb()
218 return bp; xfs_trans_getsb()
221 bp = xfs_getsb(mp, flags); xfs_trans_getsb()
222 if (bp == NULL) xfs_trans_getsb()
225 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_getsb()
226 trace_xfs_trans_getsb(bp->b_fspriv); xfs_trans_getsb()
227 return bp; xfs_trans_getsb()
251 struct xfs_buf *bp = NULL; xfs_trans_read_buf_map() local
265 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); xfs_trans_read_buf_map()
266 if (bp) { xfs_trans_read_buf_map()
267 ASSERT(xfs_buf_islocked(bp)); xfs_trans_read_buf_map()
268 ASSERT(bp->b_transp == tp); xfs_trans_read_buf_map()
269 ASSERT(bp->b_fspriv != NULL); xfs_trans_read_buf_map()
270 ASSERT(!bp->b_error); xfs_trans_read_buf_map()
271 ASSERT(bp->b_flags & XBF_DONE); xfs_trans_read_buf_map()
278 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_trans_read_buf_map()
282 bip = bp->b_fspriv; xfs_trans_read_buf_map()
287 *bpp = bp; xfs_trans_read_buf_map()
291 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); xfs_trans_read_buf_map()
292 if (!bp) { xfs_trans_read_buf_map()
307 if (bp->b_error) { xfs_trans_read_buf_map()
308 error = bp->b_error; xfs_trans_read_buf_map()
310 xfs_buf_ioerror_alert(bp, __func__); xfs_trans_read_buf_map()
311 bp->b_flags &= ~XBF_DONE; xfs_trans_read_buf_map()
312 xfs_buf_stale(bp); xfs_trans_read_buf_map()
316 xfs_buf_relse(bp); xfs_trans_read_buf_map()
325 xfs_buf_relse(bp); xfs_trans_read_buf_map()
326 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); xfs_trans_read_buf_map()
331 _xfs_trans_bjoin(tp, bp, 1); xfs_trans_read_buf_map()
332 trace_xfs_trans_read_buf(bp->b_fspriv); xfs_trans_read_buf_map()
334 *bpp = bp; xfs_trans_read_buf_map()
340 * Release the buffer bp which was previously acquired with one of the
356 xfs_buf_t *bp) xfs_trans_brelse()
364 ASSERT(bp->b_transp == NULL); xfs_trans_brelse()
365 xfs_buf_relse(bp); xfs_trans_brelse()
369 ASSERT(bp->b_transp == tp); xfs_trans_brelse()
370 bip = bp->b_fspriv; xfs_trans_brelse()
433 ASSERT(bp->b_pincount == 0); xfs_trans_brelse()
438 xfs_buf_item_relse(bp); xfs_trans_brelse()
441 bp->b_transp = NULL; xfs_trans_brelse()
442 xfs_buf_relse(bp); xfs_trans_brelse()
453 xfs_buf_t *bp) xfs_trans_bhold()
455 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_bhold()
457 ASSERT(bp->b_transp == tp); xfs_trans_bhold()
473 xfs_buf_t *bp) xfs_trans_bhold_release()
475 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_bhold_release()
477 ASSERT(bp->b_transp == tp); xfs_trans_bhold_release()
499 xfs_buf_t *bp, xfs_trans_log_buf()
503 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_log_buf()
505 ASSERT(bp->b_transp == tp); xfs_trans_log_buf()
507 ASSERT(first <= last && last < BBTOB(bp->b_length)); xfs_trans_log_buf()
508 ASSERT(bp->b_iodone == NULL || xfs_trans_log_buf()
509 bp->b_iodone == xfs_buf_iodone_callbacks); xfs_trans_log_buf()
521 XFS_BUF_DONE(bp); xfs_trans_log_buf()
524 bp->b_iodone = xfs_buf_iodone_callbacks; xfs_trans_log_buf()
537 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_trans_log_buf()
538 XFS_BUF_UNSTALE(bp); xfs_trans_log_buf()
587 xfs_buf_t *bp) xfs_trans_binval()
589 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_binval()
592 ASSERT(bp->b_transp == tp); xfs_trans_binval()
603 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_trans_binval()
613 xfs_buf_stale(bp); xfs_trans_binval()
642 xfs_buf_t *bp) xfs_trans_inode_buf()
644 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_inode_buf()
646 ASSERT(bp->b_transp == tp); xfs_trans_inode_buf()
651 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_inode_buf()
666 xfs_buf_t *bp) xfs_trans_stale_inode_buf()
668 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_stale_inode_buf()
670 ASSERT(bp->b_transp == tp); xfs_trans_stale_inode_buf()
676 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_stale_inode_buf()
691 xfs_buf_t *bp) xfs_trans_inode_alloc_buf()
693 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_trans_inode_alloc_buf()
695 ASSERT(bp->b_transp == tp); xfs_trans_inode_alloc_buf()
700 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); xfs_trans_inode_alloc_buf()
714 struct xfs_buf *bp) xfs_trans_ordered_buf()
716 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_ordered_buf()
718 ASSERT(bp->b_transp == tp); xfs_trans_ordered_buf()
733 struct xfs_buf *bp, xfs_trans_buf_set_type()
736 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_buf_set_type()
741 ASSERT(bp->b_transp == tp); xfs_trans_buf_set_type()
775 xfs_buf_t *bp, xfs_trans_dquot_buf()
778 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_trans_dquot_buf()
801 xfs_trans_buf_set_type(tp, bp, type); xfs_trans_dquot_buf()
75 _xfs_trans_bjoin( struct xfs_trans *tp, struct xfs_buf *bp, int reset_recur) _xfs_trans_bjoin() argument
116 xfs_trans_bjoin( struct xfs_trans *tp, struct xfs_buf *bp) xfs_trans_bjoin() argument
355 xfs_trans_brelse(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_brelse() argument
452 xfs_trans_bhold(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_bhold() argument
472 xfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_bhold_release() argument
498 xfs_trans_log_buf(xfs_trans_t *tp, xfs_buf_t *bp, uint first, uint last) xfs_trans_log_buf() argument
585 xfs_trans_binval( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_binval() argument
640 xfs_trans_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_inode_buf() argument
664 xfs_trans_stale_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_stale_inode_buf() argument
689 xfs_trans_inode_alloc_buf( xfs_trans_t *tp, xfs_buf_t *bp) xfs_trans_inode_alloc_buf() argument
712 xfs_trans_ordered_buf( struct xfs_trans *tp, struct xfs_buf *bp) xfs_trans_ordered_buf() argument
731 xfs_trans_buf_set_type( struct xfs_trans *tp, struct xfs_buf *bp, enum xfs_blft type) xfs_trans_buf_set_type() argument
773 xfs_trans_dquot_buf( xfs_trans_t *tp, xfs_buf_t *bp, uint type) xfs_trans_dquot_buf() argument
H A Dxfs_buf_item.c41 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
68 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_size_segment() local
102 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != xfs_buf_item_size_segment()
103 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + xfs_buf_item_size_segment()
189 struct xfs_buf *bp, xfs_buf_item_copy_iovec()
196 xfs_buf_offset(bp, offset), xfs_buf_item_copy_iovec()
202 struct xfs_buf *bp, xfs_buf_item_straddle()
207 return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) != xfs_buf_item_straddle()
208 (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) + xfs_buf_item_straddle()
220 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_format_segment() local
283 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, xfs_buf_item_format_segment()
288 xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) { xfs_buf_item_format_segment()
289 xfs_buf_item_copy_iovec(lv, vecp, bp, offset, xfs_buf_item_format_segment()
314 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_format() local
362 offset += bp->b_maps[i].bm_len; xfs_buf_item_format()
416 xfs_buf_t *bp = bip->bli_buf; xfs_buf_item_unpin() local
421 ASSERT(bp->b_fspriv == bip); xfs_buf_item_unpin()
428 if (atomic_dec_and_test(&bp->b_pin_count)) xfs_buf_item_unpin()
429 wake_up_all(&bp->b_waiters); xfs_buf_item_unpin()
433 ASSERT(xfs_buf_islocked(bp)); xfs_buf_item_unpin()
434 ASSERT(XFS_BUF_ISSTALE(bp)); xfs_buf_item_unpin()
455 bp->b_transp = NULL; xfs_buf_item_unpin()
465 xfs_buf_do_callbacks(bp); xfs_buf_item_unpin()
466 bp->b_fspriv = NULL; xfs_buf_item_unpin()
467 bp->b_iodone = NULL; xfs_buf_item_unpin()
471 xfs_buf_item_relse(bp); xfs_buf_item_unpin()
472 ASSERT(bp->b_fspriv == NULL); xfs_buf_item_unpin()
474 xfs_buf_relse(bp); xfs_buf_item_unpin()
485 * processing (via the bp->b_iodone callback), and then finally xfs_buf_item_unpin()
492 xfs_buf_lock(bp); xfs_buf_item_unpin()
493 xfs_buf_hold(bp); xfs_buf_item_unpin()
494 bp->b_flags |= XBF_ASYNC; xfs_buf_item_unpin()
495 xfs_buf_ioerror(bp, -EIO); xfs_buf_item_unpin()
496 XFS_BUF_UNDONE(bp); xfs_buf_item_unpin()
497 xfs_buf_stale(bp); xfs_buf_item_unpin()
498 xfs_buf_ioend(bp); xfs_buf_item_unpin()
516 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_push() local
519 if (xfs_buf_ispinned(bp)) xfs_buf_item_push()
521 if (!xfs_buf_trylock(bp)) { xfs_buf_item_push()
529 if (xfs_buf_ispinned(bp)) xfs_buf_item_push()
539 if ((bp->b_flags & XBF_WRITE_FAIL) && xfs_buf_item_push()
541 xfs_warn(bp->b_target->bt_mount, xfs_buf_item_push()
543 (long long)bp->b_bn); xfs_buf_item_push()
546 if (!xfs_buf_delwri_queue(bp, buffer_list)) xfs_buf_item_push()
548 xfs_buf_unlock(bp); xfs_buf_item_push()
576 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_unlock() local
582 bp->b_transp = NULL; xfs_buf_item_unlock()
647 xfs_buf_item_relse(bp); xfs_buf_item_unlock()
651 xfs_buf_item_relse(bp); xfs_buf_item_unlock()
656 xfs_buf_relse(bp); xfs_buf_item_unlock()
751 struct xfs_buf *bp, xfs_buf_item_init()
754 struct xfs_log_item *lip = bp->b_fspriv; xfs_buf_item_init()
767 ASSERT(bp->b_target->bt_mount == mp); xfs_buf_item_init()
773 bip->bli_buf = bp; xfs_buf_item_init()
784 error = xfs_buf_item_get_format(bip, bp->b_map_count); xfs_buf_item_init()
793 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len), xfs_buf_item_init()
798 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn; xfs_buf_item_init()
799 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len; xfs_buf_item_init()
807 if (bp->b_fspriv) xfs_buf_item_init()
808 bip->bli_item.li_bio_list = bp->b_fspriv; xfs_buf_item_init()
809 bp->b_fspriv = bip; xfs_buf_item_init()
810 xfs_buf_hold(bp); xfs_buf_item_init()
909 struct xfs_buf *bp = bip->bli_buf; xfs_buf_item_log() local
918 end = start + BBTOB(bp->b_maps[i].bm_len); xfs_buf_item_log()
920 start += BBTOB(bp->b_maps[i].bm_len); xfs_buf_item_log()
931 start += bp->b_maps[i].bm_len; xfs_buf_item_log()
964 xfs_buf_t *bp) xfs_buf_item_relse()
966 xfs_buf_log_item_t *bip = bp->b_fspriv; xfs_buf_item_relse()
968 trace_xfs_buf_item_relse(bp, _RET_IP_); xfs_buf_item_relse()
971 bp->b_fspriv = bip->bli_item.li_bio_list; xfs_buf_item_relse()
972 if (bp->b_fspriv == NULL) xfs_buf_item_relse()
973 bp->b_iodone = NULL; xfs_buf_item_relse()
975 xfs_buf_rele(bp); xfs_buf_item_relse()
991 xfs_buf_t *bp, xfs_buf_attach_iodone()
997 ASSERT(xfs_buf_islocked(bp)); xfs_buf_attach_iodone()
1000 head_lip = bp->b_fspriv; xfs_buf_attach_iodone()
1005 bp->b_fspriv = lip; xfs_buf_attach_iodone()
1008 ASSERT(bp->b_iodone == NULL || xfs_buf_attach_iodone()
1009 bp->b_iodone == xfs_buf_iodone_callbacks); xfs_buf_attach_iodone()
1010 bp->b_iodone = xfs_buf_iodone_callbacks; xfs_buf_attach_iodone()
1027 struct xfs_buf *bp) xfs_buf_do_callbacks()
1031 while ((lip = bp->b_fspriv) != NULL) { xfs_buf_do_callbacks()
1032 bp->b_fspriv = lip->li_bio_list; xfs_buf_do_callbacks()
1041 lip->li_cb(bp, lip); xfs_buf_do_callbacks()
1054 struct xfs_buf *bp) xfs_buf_iodone_callbacks()
1056 struct xfs_log_item *lip = bp->b_fspriv; xfs_buf_iodone_callbacks()
1061 if (likely(!bp->b_error)) xfs_buf_iodone_callbacks()
1069 xfs_buf_stale(bp); xfs_buf_iodone_callbacks()
1070 XFS_BUF_DONE(bp); xfs_buf_iodone_callbacks()
1071 trace_xfs_buf_item_iodone(bp, _RET_IP_); xfs_buf_iodone_callbacks()
1075 if (bp->b_target != lasttarg || xfs_buf_iodone_callbacks()
1078 xfs_buf_ioerror_alert(bp, __func__); xfs_buf_iodone_callbacks()
1080 lasttarg = bp->b_target; xfs_buf_iodone_callbacks()
1093 if (XFS_BUF_ISASYNC(bp)) { xfs_buf_iodone_callbacks()
1094 ASSERT(bp->b_iodone != NULL); xfs_buf_iodone_callbacks()
1096 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); xfs_buf_iodone_callbacks()
1098 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ xfs_buf_iodone_callbacks()
1100 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) { xfs_buf_iodone_callbacks()
1101 bp->b_flags |= XBF_WRITE | XBF_ASYNC | xfs_buf_iodone_callbacks()
1103 xfs_buf_submit(bp); xfs_buf_iodone_callbacks()
1105 xfs_buf_relse(bp); xfs_buf_iodone_callbacks()
1115 xfs_buf_stale(bp); xfs_buf_iodone_callbacks()
1116 XFS_BUF_DONE(bp); xfs_buf_iodone_callbacks()
1118 trace_xfs_buf_error_relse(bp, _RET_IP_); xfs_buf_iodone_callbacks()
1121 xfs_buf_do_callbacks(bp); xfs_buf_iodone_callbacks()
1122 bp->b_fspriv = NULL; xfs_buf_iodone_callbacks()
1123 bp->b_iodone = NULL; xfs_buf_iodone_callbacks()
1124 xfs_buf_ioend(bp); xfs_buf_iodone_callbacks()
1136 struct xfs_buf *bp, xfs_buf_iodone()
1141 ASSERT(BUF_ITEM(lip)->bli_buf == bp); xfs_buf_iodone()
1143 xfs_buf_rele(bp); xfs_buf_iodone()
186 xfs_buf_item_copy_iovec( struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, struct xfs_buf *bp, uint offset, int first_bit, uint nbits) xfs_buf_item_copy_iovec() argument
201 xfs_buf_item_straddle( struct xfs_buf *bp, uint offset, int next_bit, int last_bit) xfs_buf_item_straddle() argument
750 xfs_buf_item_init( struct xfs_buf *bp, struct xfs_mount *mp) xfs_buf_item_init() argument
963 xfs_buf_item_relse( xfs_buf_t *bp) xfs_buf_item_relse() argument
990 xfs_buf_attach_iodone( xfs_buf_t *bp, void (*cb)(xfs_buf_t *, xfs_log_item_t *), xfs_log_item_t *lip) xfs_buf_attach_iodone() argument
1026 xfs_buf_do_callbacks( struct xfs_buf *bp) xfs_buf_do_callbacks() argument
1053 xfs_buf_iodone_callbacks( struct xfs_buf *bp) xfs_buf_iodone_callbacks() argument
1135 xfs_buf_iodone( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_buf_iodone() argument
H A Dxfs_attr_list.c228 struct xfs_buf *bp; xfs_attr_node_list() local
242 bp = NULL; xfs_attr_node_list()
245 &bp, XFS_ATTR_FORK); xfs_attr_node_list()
248 if (bp) { xfs_attr_node_list()
251 node = bp->b_addr; xfs_attr_node_list()
256 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
257 bp = NULL; xfs_attr_node_list()
261 leaf = bp->b_addr; xfs_attr_node_list()
268 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
269 bp = NULL; xfs_attr_node_list()
273 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
274 bp = NULL; xfs_attr_node_list()
279 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
280 bp = NULL; xfs_attr_node_list()
290 if (bp == NULL) { xfs_attr_node_list()
296 cursor->blkno, -1, &bp, xfs_attr_node_list()
300 node = bp->b_addr; xfs_attr_node_list()
311 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
327 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
330 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
333 ASSERT(bp != NULL); xfs_attr_node_list()
341 leaf = bp->b_addr; xfs_attr_node_list()
342 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_node_list()
344 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
351 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
352 error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); xfs_attr_node_list()
356 xfs_trans_brelse(NULL, bp); xfs_attr_node_list()
365 struct xfs_buf *bp, xfs_attr3_leaf_list_int()
379 leaf = bp->b_addr; xfs_attr3_leaf_list_int()
493 struct xfs_buf *bp; xfs_attr_leaf_list() local
498 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); xfs_attr_leaf_list()
502 error = xfs_attr3_leaf_list_int(bp, context); xfs_attr_leaf_list()
503 xfs_trans_brelse(NULL, bp); xfs_attr_leaf_list()
364 xfs_attr3_leaf_list_int( struct xfs_buf *bp, struct xfs_attr_list_context *context) xfs_attr3_leaf_list_int() argument
H A Dxfs_attr_inactive.c54 struct xfs_buf *bp; xfs_attr3_leaf_freextent() local
91 bp = xfs_trans_get_buf(*trans, xfs_attr3_leaf_freextent()
94 if (!bp) xfs_attr3_leaf_freextent()
96 xfs_trans_binval(*trans, bp); xfs_attr3_leaf_freextent()
122 struct xfs_buf *bp) xfs_attr3_leaf_inactive()
135 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_inactive()
137 leaf = bp->b_addr; xfs_attr3_leaf_inactive()
158 xfs_trans_brelse(*trans, bp); xfs_attr3_leaf_inactive()
185 xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */ xfs_attr3_leaf_inactive()
211 struct xfs_buf *bp, xfs_attr3_node_inactive()
227 xfs_trans_brelse(*trans, bp); /* no locks for later trans */ xfs_attr3_node_inactive()
231 node = bp->b_addr; xfs_attr3_node_inactive()
233 parent_blkno = bp->b_bn; xfs_attr3_node_inactive()
235 xfs_trans_brelse(*trans, bp); xfs_attr3_node_inactive()
240 xfs_trans_brelse(*trans, bp); /* no locks for later trans */ xfs_attr3_node_inactive()
302 &bp, XFS_ATTR_FORK); xfs_attr3_node_inactive()
306 xfs_trans_brelse(*trans, bp); xfs_attr3_node_inactive()
331 struct xfs_buf *bp; xfs_attr3_root_inactive() local
341 error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); xfs_attr3_root_inactive()
344 blkno = bp->b_bn; xfs_attr3_root_inactive()
350 info = bp->b_addr; xfs_attr3_root_inactive()
354 error = xfs_attr3_node_inactive(trans, dp, bp, 1); xfs_attr3_root_inactive()
358 error = xfs_attr3_leaf_inactive(trans, dp, bp); xfs_attr3_root_inactive()
362 xfs_trans_brelse(*trans, bp); xfs_attr3_root_inactive()
371 error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); xfs_attr3_root_inactive()
374 xfs_trans_binval(*trans, bp); /* remove from cache */ xfs_attr3_root_inactive()
119 xfs_attr3_leaf_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp) xfs_attr3_leaf_inactive() argument
208 xfs_attr3_node_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp, int level) xfs_attr3_node_inactive() argument
H A Dxfs_symlink.c52 struct xfs_buf *bp; xfs_readlink_bmap() local
73 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0, xfs_readlink_bmap()
75 if (!bp) xfs_readlink_bmap()
77 error = bp->b_error; xfs_readlink_bmap()
79 xfs_buf_ioerror_alert(bp, __func__); xfs_readlink_bmap()
80 xfs_buf_relse(bp); xfs_readlink_bmap()
91 cur_chunk = bp->b_addr; xfs_readlink_bmap()
94 byte_cnt, bp)) { xfs_readlink_bmap()
99 xfs_buf_relse(bp); xfs_readlink_bmap()
112 xfs_buf_relse(bp); xfs_readlink_bmap()
190 xfs_buf_t *bp; xfs_symlink() local
343 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, xfs_symlink()
345 if (!bp) { xfs_symlink()
349 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink()
354 buf = bp->b_addr; xfs_symlink()
356 byte_cnt, bp); xfs_symlink()
364 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF); xfs_symlink()
365 xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) - xfs_symlink()
366 (char *)bp->b_addr); xfs_symlink()
436 xfs_buf_t *bp; xfs_inactive_symlink_rmt() local
493 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, xfs_inactive_symlink_rmt()
496 if (!bp) { xfs_inactive_symlink_rmt()
500 xfs_trans_binval(tp, bp); xfs_inactive_symlink_rmt()
H A Dxfs_log_recover.c104 struct xfs_buf *bp; xlog_get_bp() local
133 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0); xlog_get_bp()
134 if (bp) xlog_get_bp()
135 xfs_buf_unlock(bp); xlog_get_bp()
136 return bp; xlog_get_bp()
141 xfs_buf_t *bp) xlog_put_bp()
143 xfs_buf_free(bp); xlog_put_bp()
155 struct xfs_buf *bp) xlog_align()
159 ASSERT(offset + nbblks <= bp->b_length); xlog_align()
160 return bp->b_addr + BBTOB(offset); xlog_align()
172 struct xfs_buf *bp) xlog_bread_noalign()
187 ASSERT(nbblks <= bp->b_length); xlog_bread_noalign()
189 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); xlog_bread_noalign()
190 XFS_BUF_READ(bp); xlog_bread_noalign()
191 bp->b_io_length = nbblks; xlog_bread_noalign()
192 bp->b_error = 0; xlog_bread_noalign()
194 error = xfs_buf_submit_wait(bp); xlog_bread_noalign()
196 xfs_buf_ioerror_alert(bp, __func__); xlog_bread_noalign()
205 struct xfs_buf *bp, xlog_bread()
210 error = xlog_bread_noalign(log, blk_no, nbblks, bp); xlog_bread()
214 *offset = xlog_align(log, blk_no, nbblks, bp); xlog_bread()
227 struct xfs_buf *bp, xlog_bread_offset()
230 char *orig_offset = bp->b_addr; xlog_bread_offset()
231 int orig_len = BBTOB(bp->b_length); xlog_bread_offset()
234 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); xlog_bread_offset()
238 error = xlog_bread_noalign(log, blk_no, nbblks, bp); xlog_bread_offset()
241 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); xlog_bread_offset()
257 struct xfs_buf *bp) xlog_bwrite()
272 ASSERT(nbblks <= bp->b_length); xlog_bwrite()
274 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); xlog_bwrite()
275 XFS_BUF_ZEROFLAGS(bp); xlog_bwrite()
276 xfs_buf_hold(bp); xlog_bwrite()
277 xfs_buf_lock(bp); xlog_bwrite()
278 bp->b_io_length = nbblks; xlog_bwrite()
279 bp->b_error = 0; xlog_bwrite()
281 error = xfs_bwrite(bp); xlog_bwrite()
283 xfs_buf_ioerror_alert(bp, __func__); xlog_bwrite()
284 xfs_buf_relse(bp); xlog_bwrite()
368 struct xfs_buf *bp) xlog_recover_iodone()
370 if (bp->b_error) { xlog_recover_iodone()
375 if (!XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { xlog_recover_iodone()
376 xfs_buf_ioerror_alert(bp, __func__); xlog_recover_iodone()
377 xfs_force_shutdown(bp->b_target->bt_mount, xlog_recover_iodone()
381 bp->b_iodone = NULL; xlog_recover_iodone()
382 xfs_buf_ioend(bp); xlog_recover_iodone()
394 struct xfs_buf *bp, xlog_find_cycle_start()
408 error = xlog_bread(log, mid_blk, 1, bp, &offset); xlog_find_cycle_start()
444 xfs_buf_t *bp; xlog_find_verify_cycle() local
458 while (!(bp = xlog_get_bp(log, bufblks))) { xlog_find_verify_cycle()
469 error = xlog_bread(log, i, bcount, bp, &buf); xlog_find_verify_cycle()
487 xlog_put_bp(bp); xlog_find_verify_cycle()
511 xfs_buf_t *bp; xlog_find_verify_log_record() local
521 if (!(bp = xlog_get_bp(log, num_blks))) { xlog_find_verify_log_record()
522 if (!(bp = xlog_get_bp(log, 1))) xlog_find_verify_log_record()
526 error = xlog_bread(log, start_blk, num_blks, bp, &offset); xlog_find_verify_log_record()
543 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_verify_log_record()
596 xlog_put_bp(bp); xlog_find_verify_log_record()
618 xfs_buf_t *bp; xlog_find_head() local
648 bp = xlog_get_bp(log, 1); xlog_find_head()
649 if (!bp) xlog_find_head()
652 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_head()
659 error = xlog_bread(log, last_blk, 1, bp, &offset); xlog_find_head()
729 if ((error = xlog_find_cycle_start(log, bp, first_blk, xlog_find_head()
849 xlog_put_bp(bp); xlog_find_head()
863 xlog_put_bp(bp); xlog_find_head()
895 xfs_buf_t *bp; xlog_find_tail() local
910 bp = xlog_get_bp(log, 1); xlog_find_tail()
911 if (!bp) xlog_find_tail()
914 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_tail()
930 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_tail()
947 error = xlog_bread(log, i, 1, bp, &offset); xlog_find_tail()
960 xlog_put_bp(bp); xlog_find_tail()
1023 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); xlog_find_tail()
1073 xlog_put_bp(bp); xlog_find_tail()
1101 xfs_buf_t *bp; xlog_find_zeroed() local
1111 bp = xlog_get_bp(log, 1); xlog_find_zeroed()
1112 if (!bp) xlog_find_zeroed()
1114 error = xlog_bread(log, 0, 1, bp, &offset); xlog_find_zeroed()
1121 xlog_put_bp(bp); xlog_find_zeroed()
1126 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); xlog_find_zeroed()
1132 xlog_put_bp(bp); xlog_find_zeroed()
1148 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) xlog_find_zeroed()
1188 xlog_put_bp(bp); xlog_find_zeroed()
1231 xfs_buf_t *bp; xlog_write_log_records() local
1248 while (!(bp = xlog_get_bp(log, bufblks))) { xlog_write_log_records()
1260 error = xlog_bread_noalign(log, start_block, 1, bp); xlog_write_log_records()
1279 offset = bp->b_addr + BBTOB(ealign - start_block); xlog_write_log_records()
1281 bp, offset); xlog_write_log_records()
1287 offset = xlog_align(log, start_block, endcount, bp); xlog_write_log_records()
1293 error = xlog_bwrite(log, start_block, endcount, bp); xlog_write_log_records()
1301 xlog_put_bp(bp); xlog_write_log_records()
1707 struct xfs_buf *bp, xlog_recover_do_inode_buffer()
1728 bp->b_ops = &xfs_inode_buf_ops; xlog_recover_do_inode_buffer()
1730 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog; xlog_recover_do_inode_buffer()
1773 BBTOB(bp->b_io_length)); xlog_recover_do_inode_buffer()
1784 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " xlog_recover_do_inode_buffer()
1786 item, bp); xlog_recover_do_inode_buffer()
1792 buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset); xlog_recover_do_inode_buffer()
1801 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize)); xlog_recover_do_inode_buffer()
1831 struct xfs_buf *bp) xlog_recover_get_buf_lsn()
1836 void *blk = bp->b_addr; xlog_recover_get_buf_lsn()
1977 struct xfs_buf *bp, xlog_recover_validate_buf_type()
1980 struct xfs_da_blkinfo *info = bp->b_addr; xlog_recover_validate_buf_type()
1996 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); xlog_recover_validate_buf_type()
1997 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); xlog_recover_validate_buf_type()
2006 bp->b_ops = &xfs_allocbt_buf_ops; xlog_recover_validate_buf_type()
2012 bp->b_ops = &xfs_inobt_buf_ops; xlog_recover_validate_buf_type()
2016 bp->b_ops = &xfs_bmbt_buf_ops; xlog_recover_validate_buf_type()
2030 bp->b_ops = &xfs_agf_buf_ops; xlog_recover_validate_buf_type()
2038 bp->b_ops = &xfs_agfl_buf_ops; xlog_recover_validate_buf_type()
2046 bp->b_ops = &xfs_agi_buf_ops; xlog_recover_validate_buf_type()
2057 bp->b_ops = &xfs_dquot_buf_ops; xlog_recover_validate_buf_type()
2070 bp->b_ops = &xfs_inode_buf_ops; xlog_recover_validate_buf_type()
2078 bp->b_ops = &xfs_symlink_buf_ops; xlog_recover_validate_buf_type()
2087 bp->b_ops = &xfs_dir3_block_buf_ops; xlog_recover_validate_buf_type()
2096 bp->b_ops = &xfs_dir3_data_buf_ops; xlog_recover_validate_buf_type()
2105 bp->b_ops = &xfs_dir3_free_buf_ops; xlog_recover_validate_buf_type()
2114 bp->b_ops = &xfs_dir3_leaf1_buf_ops; xlog_recover_validate_buf_type()
2123 bp->b_ops = &xfs_dir3_leafn_buf_ops; xlog_recover_validate_buf_type()
2132 bp->b_ops = &xfs_da3_node_buf_ops; xlog_recover_validate_buf_type()
2141 bp->b_ops = &xfs_attr3_leaf_buf_ops; xlog_recover_validate_buf_type()
2149 bp->b_ops = &xfs_attr3_rmt_buf_ops; xlog_recover_validate_buf_type()
2157 bp->b_ops = &xfs_sb_buf_ops; xlog_recover_validate_buf_type()
2176 struct xfs_buf *bp, xlog_recover_do_reg_buffer()
2198 ASSERT(BBTOB(bp->b_io_length) >= xlog_recover_do_reg_buffer()
2238 memcpy(xfs_buf_offset(bp, xlog_recover_do_reg_buffer()
2250 xlog_recover_validate_buf_type(mp, bp, buf_f); xlog_recover_do_reg_buffer()
2267 struct xfs_buf *bp, xlog_recover_do_dquot_buffer()
2293 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); xlog_recover_do_dquot_buffer()
2329 xfs_buf_t *bp; xlog_recover_buffer_pass2() local
2350 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, xlog_recover_buffer_pass2()
2352 if (!bp) xlog_recover_buffer_pass2()
2354 error = bp->b_error; xlog_recover_buffer_pass2()
2356 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); xlog_recover_buffer_pass2()
2379 lsn = xlog_recover_get_buf_lsn(mp, bp); xlog_recover_buffer_pass2()
2381 xlog_recover_validate_buf_type(mp, bp, buf_f); xlog_recover_buffer_pass2()
2386 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); xlog_recover_buffer_pass2()
2393 dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); xlog_recover_buffer_pass2()
2397 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); xlog_recover_buffer_pass2()
2416 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && xlog_recover_buffer_pass2()
2417 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize, xlog_recover_buffer_pass2()
2419 xfs_buf_stale(bp); xlog_recover_buffer_pass2()
2420 error = xfs_bwrite(bp); xlog_recover_buffer_pass2()
2422 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_buffer_pass2()
2423 bp->b_iodone = xlog_recover_iodone; xlog_recover_buffer_pass2()
2424 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_buffer_pass2()
2428 xfs_buf_relse(bp); xlog_recover_buffer_pass2()
2517 xfs_buf_t *bp; xlog_recover_inode_pass2() local
2551 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, xlog_recover_inode_pass2()
2553 if (!bp) { xlog_recover_inode_pass2()
2557 error = bp->b_error; xlog_recover_inode_pass2()
2559 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)"); xlog_recover_inode_pass2()
2563 dip = xfs_buf_offset(bp, in_f->ilf_boffset); xlog_recover_inode_pass2()
2571 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", xlog_recover_inode_pass2()
2572 __func__, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2()
2640 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", xlog_recover_inode_pass2()
2641 __func__, item, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2()
2653 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", xlog_recover_inode_pass2()
2654 __func__, item, dip, bp, in_f->ilf_ino); xlog_recover_inode_pass2()
2664 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", xlog_recover_inode_pass2()
2665 __func__, item, dip, bp, in_f->ilf_ino, xlog_recover_inode_pass2()
2676 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, xlog_recover_inode_pass2()
2677 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); xlog_recover_inode_pass2()
2788 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_inode_pass2()
2789 bp->b_iodone = xlog_recover_iodone; xlog_recover_inode_pass2()
2790 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_inode_pass2()
2793 xfs_buf_relse(bp); xlog_recover_inode_pass2()
2838 xfs_buf_t *bp; xlog_recover_dquot_pass2() local
2896 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, xlog_recover_dquot_pass2()
2901 ASSERT(bp); xlog_recover_dquot_pass2()
2902 ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); xlog_recover_dquot_pass2()
2924 ASSERT(bp->b_target->bt_mount == mp); xlog_recover_dquot_pass2()
2925 bp->b_iodone = xlog_recover_iodone; xlog_recover_dquot_pass2()
2926 xfs_buf_delwri_queue(bp, buffer_list); xlog_recover_dquot_pass2()
2929 xfs_buf_relse(bp); xlog_recover_dquot_pass2()
4538 xfs_buf_t *bp; xlog_do_recover() local
4570 bp = xfs_getsb(log->l_mp, 0); xlog_do_recover()
4571 XFS_BUF_UNDONE(bp); xlog_do_recover()
4572 ASSERT(!(XFS_BUF_ISWRITE(bp))); xlog_do_recover()
4573 XFS_BUF_READ(bp); xlog_do_recover()
4574 XFS_BUF_UNASYNC(bp); xlog_do_recover()
4575 bp->b_ops = &xfs_sb_buf_ops; xlog_do_recover()
4577 error = xfs_buf_submit_wait(bp); xlog_do_recover()
4580 xfs_buf_ioerror_alert(bp, __func__); xlog_do_recover()
4583 xfs_buf_relse(bp); xlog_do_recover()
4589 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); xlog_do_recover()
4594 xfs_buf_relse(bp); xlog_do_recover()
140 xlog_put_bp( xfs_buf_t *bp) xlog_put_bp() argument
151 xlog_align( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_align() argument
168 xlog_bread_noalign( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_bread_noalign() argument
201 xlog_bread( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp, char **offset) xlog_bread() argument
223 xlog_bread_offset( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp, char *offset) xlog_bread_offset() argument
253 xlog_bwrite( struct xlog *log, xfs_daddr_t blk_no, int nbblks, struct xfs_buf *bp) xlog_bwrite() argument
367 xlog_recover_iodone( struct xfs_buf *bp) xlog_recover_iodone() argument
392 xlog_find_cycle_start( struct xlog *log, struct xfs_buf *bp, xfs_daddr_t first_blk, xfs_daddr_t *last_blk, uint cycle) xlog_find_cycle_start() argument
1704 xlog_recover_do_inode_buffer( struct xfs_mount *mp, xlog_recover_item_t *item, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_do_inode_buffer() argument
1829 xlog_recover_get_buf_lsn( struct xfs_mount *mp, struct xfs_buf *bp) xlog_recover_get_buf_lsn() argument
1975 xlog_recover_validate_buf_type( struct xfs_mount *mp, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_validate_buf_type() argument
2173 xlog_recover_do_reg_buffer( struct xfs_mount *mp, xlog_recover_item_t *item, struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) xlog_recover_do_reg_buffer() argument
2263 xlog_recover_do_dquot_buffer( struct xfs_mount *mp, struct xlog *log, struct xlog_recover_item *item, struct xfs_buf *bp, struct xfs_buf_log_format *buf_f) xlog_recover_do_dquot_buffer() argument
H A Dxfs_error.c163 struct xfs_buf *bp) xfs_verifier_error()
165 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_verifier_error()
168 bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", xfs_verifier_error()
169 __return_address, bp->b_ops->name, bp->b_bn); xfs_verifier_error()
175 xfs_hex_dump(xfs_buf_offset(bp, 0), 64); xfs_verifier_error()
162 xfs_verifier_error( struct xfs_buf *bp) xfs_verifier_error() argument
H A Dxfs_dquot.c231 xfs_buf_t *bp) xfs_qm_init_dquot_blk()
238 ASSERT(xfs_buf_islocked(bp)); xfs_qm_init_dquot_blk()
240 d = bp->b_addr; xfs_qm_init_dquot_blk()
260 xfs_trans_dquot_buf(tp, bp, xfs_qm_init_dquot_blk()
264 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); xfs_qm_init_dquot_blk()
310 xfs_buf_t *bp; xfs_qm_dqalloc() local
350 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, xfs_qm_dqalloc()
354 if (!bp) { xfs_qm_dqalloc()
358 bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_dqalloc()
365 dqp->dq_flags & XFS_DQ_ALLTYPES, bp); xfs_qm_dqalloc()
380 xfs_trans_bhold(tp, bp); xfs_qm_dqalloc()
388 xfs_trans_bjoin(tp, bp); xfs_qm_dqalloc()
390 xfs_trans_bhold_release(tp, bp); xfs_qm_dqalloc()
393 *O_bpp = bp; xfs_qm_dqalloc()
465 struct xfs_buf *bp; xfs_qm_dqtobp() local
513 dqp->q_fileoffset, &bp); xfs_qm_dqtobp()
529 0, &bp, &xfs_dquot_buf_ops); xfs_qm_dqtobp()
534 ASSERT(bp == NULL); xfs_qm_dqtobp()
535 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp); xfs_qm_dqtobp()
539 ASSERT(bp == NULL); xfs_qm_dqtobp()
544 ASSERT(xfs_buf_islocked(bp)); xfs_qm_dqtobp()
545 *O_bpp = bp; xfs_qm_dqtobp()
546 *O_ddpp = bp->b_addr + dqp->q_bufoffset; xfs_qm_dqtobp()
568 struct xfs_buf *bp; xfs_qm_dqread() local
624 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); xfs_qm_dqread()
651 xfs_buf_set_ref(bp, XFS_DQUOT_REF); xfs_qm_dqread()
665 ASSERT(xfs_buf_islocked(bp)); xfs_qm_dqread()
666 xfs_trans_brelse(tp, bp); xfs_qm_dqread()
886 struct xfs_buf *bp, xfs_qm_dqflush_done()
932 struct xfs_buf *bp; xfs_qm_dqflush() local
967 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqflush()
975 ddqp = bp->b_addr + dqp->q_bufoffset; xfs_qm_dqflush()
983 xfs_buf_relse(bp); xfs_qm_dqflush()
1021 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, xfs_qm_dqflush()
1028 if (xfs_buf_ispinned(bp)) { xfs_qm_dqflush()
1034 *bpp = bp; xfs_qm_dqflush()
226 xfs_qm_init_dquot_blk( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dqid_t id, uint type, xfs_buf_t *bp) xfs_qm_init_dquot_blk() argument
885 xfs_qm_dqflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_qm_dqflush_done() argument
H A Dxfs_dir2_readdir.c164 struct xfs_buf *bp; /* buffer for block */ xfs_dir2_block_getdents() local
183 error = xfs_dir3_block_read(NULL, dp, &bp); xfs_dir2_block_getdents()
193 hdr = bp->b_addr; xfs_dir2_block_getdents()
194 xfs_dir3_data_check(dp, bp); xfs_dir2_block_getdents()
241 xfs_trans_brelse(NULL, bp); xfs_dir2_block_getdents()
252 xfs_trans_brelse(NULL, bp); xfs_dir2_block_getdents()
279 struct xfs_buf *bp = *bpp; xfs_dir2_leaf_readbuf() local
293 if (bp) { xfs_dir2_leaf_readbuf()
294 xfs_trans_brelse(NULL, bp); xfs_dir2_leaf_readbuf()
295 bp = NULL; xfs_dir2_leaf_readbuf()
393 -1, &bp); xfs_dir2_leaf_readbuf()
464 *bpp = bp; xfs_dir2_leaf_readbuf()
479 struct xfs_buf *bp = NULL; /* data block buffer */ xfs_dir2_leaf_getdents() local
534 if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) { xfs_dir2_leaf_getdents()
539 &curoff, &bp); xfs_dir2_leaf_getdents()
560 hdr = bp->b_addr; xfs_dir2_leaf_getdents()
561 xfs_dir3_data_check(dp, bp); xfs_dir2_leaf_getdents()
645 if (bp) xfs_dir2_leaf_getdents()
646 xfs_trans_brelse(NULL, bp); xfs_dir2_leaf_getdents()
H A Dxfs_mount.c274 struct xfs_buf *bp; xfs_readsb() local
300 BTOBB(sector_size), 0, &bp, buf_ops); xfs_readsb()
313 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); xfs_readsb()
342 xfs_buf_relse(bp); xfs_readsb()
351 bp->b_ops = &xfs_sb_buf_ops; xfs_readsb()
353 mp->m_sb_bp = bp; xfs_readsb()
354 xfs_buf_unlock(bp); xfs_readsb()
358 xfs_buf_relse(bp); xfs_readsb()
539 struct xfs_buf *bp; xfs_check_sizes() local
550 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL); xfs_check_sizes()
555 xfs_buf_relse(bp); xfs_check_sizes()
567 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_check_sizes()
572 xfs_buf_relse(bp); xfs_check_sizes()
1278 struct xfs_buf *bp = mp->m_sb_bp; xfs_getsb() local
1280 if (!xfs_buf_trylock(bp)) { xfs_getsb()
1283 xfs_buf_lock(bp); xfs_getsb()
1286 xfs_buf_hold(bp); xfs_getsb()
1287 ASSERT(XFS_BUF_ISDONE(bp)); xfs_getsb()
1288 return bp; xfs_getsb()
1298 struct xfs_buf *bp = mp->m_sb_bp; xfs_freesb() local
1300 xfs_buf_lock(bp); xfs_freesb()
1302 xfs_buf_relse(bp); xfs_freesb()
H A Dxfs_qm.c152 struct xfs_buf *bp = NULL; xfs_qm_dqpurge() local
159 error = xfs_qm_dqflush(dqp, &bp); xfs_qm_dqpurge()
164 error = xfs_bwrite(bp); xfs_qm_dqpurge()
165 xfs_buf_relse(bp); xfs_qm_dqpurge()
470 struct xfs_buf *bp = NULL; __releases() local
478 error = xfs_qm_dqflush(dqp, &bp); __releases()
485 xfs_buf_delwri_queue(bp, &isol->buffers); __releases()
486 xfs_buf_relse(bp); __releases()
812 xfs_buf_t *bp, xfs_qm_reset_dqcounts()
819 trace_xfs_reset_dqcounts(bp, _RET_IP_); xfs_qm_reset_dqcounts()
830 dqb = bp->b_addr; xfs_qm_reset_dqcounts()
875 struct xfs_buf *bp; xfs_qm_dqiter_bufs() local
896 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqiter_bufs()
909 mp->m_quotainfo->qi_dqchunklen, 0, &bp, xfs_qm_dqiter_bufs()
921 bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_dqiter_bufs()
922 xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_qm_dqiter_bufs()
923 xfs_buf_delwri_queue(bp, buffer_list); xfs_qm_dqiter_bufs()
924 xfs_buf_relse(bp); xfs_qm_dqiter_bufs()
1222 struct xfs_buf *bp = NULL; xfs_qm_flush_one() local
1232 error = xfs_qm_dqflush(dqp, &bp); xfs_qm_flush_one()
1236 xfs_buf_delwri_queue(bp, buffer_list); xfs_qm_flush_one()
1237 xfs_buf_relse(bp); xfs_qm_flush_one()
1359 struct xfs_buf *bp = xfs_qm_quotacheck() local
1361 list_del_init(&bp->b_list); xfs_qm_quotacheck()
1362 xfs_buf_relse(bp); xfs_qm_quotacheck()
810 xfs_qm_reset_dqcounts( xfs_mount_t *mp, xfs_buf_t *bp, xfs_dqid_t id, uint type) xfs_qm_reset_dqcounts() argument
H A Dxfs_trace.h309 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
310 TP_ARGS(bp, caller_ip),
322 __entry->dev = bp->b_target->bt_dev;
323 __entry->bno = bp->b_bn;
324 __entry->nblks = bp->b_length;
325 __entry->hold = atomic_read(&bp->b_hold);
326 __entry->pincount = atomic_read(&bp->b_pin_count);
327 __entry->lockval = bp->b_sema.count;
328 __entry->flags = bp->b_flags;
345 TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
346 TP_ARGS(bp, caller_ip))
382 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
383 TP_ARGS(bp, flags, caller_ip),
395 __entry->dev = bp->b_target->bt_dev;
396 __entry->bno = bp->b_bn;
397 __entry->buffer_length = BBTOB(bp->b_length);
399 __entry->hold = atomic_read(&bp->b_hold);
400 __entry->pincount = atomic_read(&bp->b_pin_count);
401 __entry->lockval = bp->b_sema.count;
418 TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
419 TP_ARGS(bp, flags, caller_ip))
425 TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
426 TP_ARGS(bp, error, caller_ip),
439 __entry->dev = bp->b_target->bt_dev;
440 __entry->bno = bp->b_bn;
441 __entry->buffer_length = BBTOB(bp->b_length);
442 __entry->hold = atomic_read(&bp->b_hold);
443 __entry->pincount = atomic_read(&bp->b_pin_count);
444 __entry->lockval = bp->b_sema.count;
446 __entry->flags = bp->b_flags;
H A Dxfs_log.c1184 xlog_iodone(xfs_buf_t *bp) xlog_iodone() argument
1186 struct xlog_in_core *iclog = bp->b_fspriv; xlog_iodone()
1193 if (XFS_TEST_ERROR(bp->b_error, l->l_mp, xlog_iodone()
1195 xfs_buf_ioerror_alert(bp, __func__); xlog_iodone()
1196 xfs_buf_stale(bp); xlog_iodone()
1209 ASSERT(XFS_BUF_ISASYNC(bp)); xlog_iodone()
1216 * (bp) after the unlock as we could race with it being freed. xlog_iodone()
1218 xfs_buf_unlock(bp); xlog_iodone()
1350 xfs_buf_t *bp; xlog_alloc_log() local
1414 bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, xlog_alloc_log()
1416 if (!bp) xlog_alloc_log()
1424 ASSERT(xfs_buf_islocked(bp)); xlog_alloc_log()
1425 xfs_buf_unlock(bp); xlog_alloc_log()
1428 bp->b_ioend_wq = mp->m_log_workqueue; xlog_alloc_log()
1429 bp->b_iodone = xlog_iodone; xlog_alloc_log()
1430 log->l_xbuf = bp; xlog_alloc_log()
1453 bp = xfs_buf_get_uncached(mp->m_logdev_targp, xlog_alloc_log()
1455 if (!bp) xlog_alloc_log()
1458 ASSERT(xfs_buf_islocked(bp)); xlog_alloc_log()
1459 xfs_buf_unlock(bp); xlog_alloc_log()
1462 bp->b_ioend_wq = mp->m_log_workqueue; xlog_alloc_log()
1463 bp->b_iodone = xlog_iodone; xlog_alloc_log()
1464 iclog->ic_bp = bp; xlog_alloc_log()
1465 iclog->ic_data = bp->b_addr; xlog_alloc_log()
1479 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; xlog_alloc_log()
1709 struct xfs_buf *bp) xlog_bdstrat()
1711 struct xlog_in_core *iclog = bp->b_fspriv; xlog_bdstrat()
1713 xfs_buf_lock(bp); xlog_bdstrat()
1715 xfs_buf_ioerror(bp, -EIO); xlog_bdstrat()
1716 xfs_buf_stale(bp); xlog_bdstrat()
1717 xfs_buf_ioend(bp); xlog_bdstrat()
1727 xfs_buf_submit(bp); xlog_bdstrat()
1761 xfs_buf_t *bp; xlog_sync() local
1805 bp = iclog->ic_bp; xlog_sync()
1806 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); xlog_sync()
1811 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { xlog_sync()
1814 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); xlog_sync()
1815 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); xlog_sync()
1842 bp->b_io_length = BTOBB(count); xlog_sync()
1843 bp->b_fspriv = iclog; xlog_sync()
1844 XFS_BUF_ZEROFLAGS(bp); xlog_sync()
1845 XFS_BUF_ASYNC(bp); xlog_sync()
1846 bp->b_flags |= XBF_SYNCIO; xlog_sync()
1849 bp->b_flags |= XBF_FUA; xlog_sync()
1863 bp->b_flags |= XBF_FLUSH; xlog_sync()
1866 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); xlog_sync()
1867 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); xlog_sync()
1872 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); xlog_sync()
1877 XFS_BUF_WRITE(bp); xlog_sync()
1879 error = xlog_bdstrat(bp); xlog_sync()
1881 xfs_buf_ioerror_alert(bp, "xlog_sync"); xlog_sync()
1885 bp = iclog->ic_log->l_xbuf; xlog_sync()
1886 XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ xlog_sync()
1887 xfs_buf_associate_memory(bp, xlog_sync()
1889 bp->b_fspriv = iclog; xlog_sync()
1890 XFS_BUF_ZEROFLAGS(bp); xlog_sync()
1891 XFS_BUF_ASYNC(bp); xlog_sync()
1892 bp->b_flags |= XBF_SYNCIO; xlog_sync()
1894 bp->b_flags |= XBF_FUA; xlog_sync()
1896 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); xlog_sync()
1897 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); xlog_sync()
1900 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); xlog_sync()
1901 XFS_BUF_WRITE(bp); xlog_sync()
1902 error = xlog_bdstrat(bp); xlog_sync()
1904 xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); xlog_sync()
1708 xlog_bdstrat( struct xfs_buf *bp) xlog_bdstrat() argument
H A Dxfs_rtalloc.c116 xfs_buf_t *bp; /* summary buffer */ xfs_rtcopy_summary() local
122 bp = NULL; xfs_rtcopy_summary()
127 error = xfs_rtget_summary(omp, tp, log, bbno, &bp, xfs_rtcopy_summary()
134 &bp, &sumbno); xfs_rtcopy_summary()
138 &bp, &sumbno); xfs_rtcopy_summary()
768 struct xfs_buf *bp; /* temporary buffer for zeroing */ xfs_growfs_rt_alloc() local
844 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, xfs_growfs_rt_alloc()
846 if (bp == NULL) { xfs_growfs_rt_alloc()
850 memset(bp->b_addr, 0, mp->m_sb.sb_blocksize); xfs_growfs_rt_alloc()
851 xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); xfs_growfs_rt_alloc()
887 xfs_buf_t *bp; /* temporary buffer */ xfs_growfs_rt() local
920 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_growfs_rt()
923 xfs_buf_relse(bp); xfs_growfs_rt()
1055 bp = NULL; xfs_growfs_rt()
1057 nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); xfs_growfs_rt()
1176 struct xfs_buf *bp; /* buffer for last block of subvolume */ xfs_rtmount_init() local
1207 XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL); xfs_rtmount_init()
1212 xfs_buf_relse(bp); xfs_rtmount_init()
H A Dxfs_inode_item.c413 struct xfs_buf *bp = NULL; xfs_inode_item_push() local
455 error = xfs_iflush(ip, &bp); xfs_inode_item_push()
457 if (!xfs_buf_delwri_queue(bp, buffer_list)) xfs_inode_item_push()
459 xfs_buf_relse(bp); xfs_inode_item_push()
598 struct xfs_buf *bp, xfs_iflush_done()
612 blip = bp->b_fspriv; xfs_iflush_done()
624 bp->b_fspriv = next; xfs_iflush_done()
732 struct xfs_buf *bp, xfs_istale_done()
597 xfs_iflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_iflush_done() argument
731 xfs_istale_done( struct xfs_buf *bp, struct xfs_log_item *lip) xfs_istale_done() argument
H A Dxfs_dquot_item.c147 struct xfs_buf *bp = NULL; variable in typeref:struct:xfs_buf
178 error = xfs_qm_dqflush(dqp, &bp);
183 if (!xfs_buf_delwri_queue(bp, buffer_list))
185 xfs_buf_relse(bp); variable
/linux-4.4.14/drivers/sbus/char/
H A Dbbc_i2c.c53 static void set_device_claimage(struct bbc_i2c_bus *bp, struct platform_device *op, int val) set_device_claimage() argument
58 if (bp->devs[i].device == op) { set_device_claimage()
59 bp->devs[i].client_claimed = val; set_device_claimage()
68 struct platform_device *bbc_i2c_getdev(struct bbc_i2c_bus *bp, int index) bbc_i2c_getdev() argument
74 if (!(op = bp->devs[i].device)) bbc_i2c_getdev()
88 struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *op) bbc_i2c_attach() argument
96 client->bp = bp; bbc_i2c_attach()
108 claim_device(bp, op); bbc_i2c_attach()
115 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_detach() local
118 release_device(bp, op); bbc_i2c_detach()
122 static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status) wait_for_pin() argument
128 bp->waiting = 1; wait_for_pin()
129 add_wait_queue(&bp->wq, &wait); wait_for_pin()
134 bp->wq, wait_for_pin()
135 (((*status = readb(bp->i2c_control_regs + 0)) wait_for_pin()
143 remove_wait_queue(&bp->wq, &wait); wait_for_pin()
144 bp->waiting = 0; wait_for_pin()
151 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_writeb() local
156 if (bp->i2c_bussel_reg != NULL) bbc_i2c_writeb()
157 writeb(client->bus, bp->i2c_bussel_reg); bbc_i2c_writeb()
159 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_writeb()
160 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_writeb()
161 if (wait_for_pin(bp, &status)) bbc_i2c_writeb()
164 writeb(off, bp->i2c_control_regs + 0x1); bbc_i2c_writeb()
165 if (wait_for_pin(bp, &status) || bbc_i2c_writeb()
169 writeb(val, bp->i2c_control_regs + 0x1); bbc_i2c_writeb()
170 if (wait_for_pin(bp, &status)) bbc_i2c_writeb()
176 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_writeb()
182 struct bbc_i2c_bus *bp = client->bp; bbc_i2c_readb() local
186 if (bp->i2c_bussel_reg != NULL) bbc_i2c_readb()
187 writeb(client->bus, bp->i2c_bussel_reg); bbc_i2c_readb()
189 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_readb()
190 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_readb()
191 if (wait_for_pin(bp, &status)) bbc_i2c_readb()
194 writeb(off, bp->i2c_control_regs + 0x1); bbc_i2c_readb()
195 if (wait_for_pin(bp, &status) || bbc_i2c_readb()
199 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_readb()
203 writeb(address, bp->i2c_control_regs + 0x1); bbc_i2c_readb()
204 writeb(I2C_PCF_START, bp->i2c_control_regs + 0x0); bbc_i2c_readb()
205 if (wait_for_pin(bp, &status)) bbc_i2c_readb()
211 (void) readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb()
212 if (wait_for_pin(bp, &status)) bbc_i2c_readb()
215 writeb(I2C_PCF_ESO | I2C_PCF_ENI, bp->i2c_control_regs + 0x0); bbc_i2c_readb()
216 *byte = readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb()
217 if (wait_for_pin(bp, &status)) bbc_i2c_readb()
223 writeb(I2C_PCF_STOP, bp->i2c_control_regs + 0x0); bbc_i2c_readb()
224 (void) readb(bp->i2c_control_regs + 0x1); bbc_i2c_readb()
272 struct bbc_i2c_bus *bp = dev_id; bbc_i2c_interrupt() local
277 if (bp->waiting && bbc_i2c_interrupt()
278 !(readb(bp->i2c_control_regs + 0x0) & I2C_PCF_PIN)) bbc_i2c_interrupt()
279 wake_up_interruptible(&bp->wq); bbc_i2c_interrupt()
284 static void reset_one_i2c(struct bbc_i2c_bus *bp) reset_one_i2c() argument
286 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); reset_one_i2c()
287 writeb(bp->own, bp->i2c_control_regs + 0x1); reset_one_i2c()
288 writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); reset_one_i2c()
289 writeb(bp->clock, bp->i2c_control_regs + 0x1); reset_one_i2c()
290 writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); reset_one_i2c()
295 struct bbc_i2c_bus *bp; attach_one_i2c() local
299 bp = kzalloc(sizeof(*bp), GFP_KERNEL); attach_one_i2c()
300 if (!bp) attach_one_i2c()
303 INIT_LIST_HEAD(&bp->temps); attach_one_i2c()
304 INIT_LIST_HEAD(&bp->fans); attach_one_i2c()
306 bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); attach_one_i2c()
307 if (!bp->i2c_control_regs) attach_one_i2c()
311 bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); attach_one_i2c()
312 if (!bp->i2c_bussel_reg) attach_one_i2c()
316 bp->waiting = 0; attach_one_i2c()
317 init_waitqueue_head(&bp->wq); attach_one_i2c()
319 IRQF_SHARED, "bbc_i2c", bp)) attach_one_i2c()
322 bp->index = index; attach_one_i2c()
323 bp->op = op; attach_one_i2c()
325 spin_lock_init(&bp->lock); attach_one_i2c()
334 bp->devs[entry].device = child_op; attach_one_i2c()
335 bp->devs[entry].client_claimed = 0; attach_one_i2c()
338 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); attach_one_i2c()
339 bp->own = readb(bp->i2c_control_regs + 0x01); attach_one_i2c()
340 writeb(I2C_PCF_PIN | I2C_PCF_ES1, bp->i2c_control_regs + 0x0); attach_one_i2c()
341 bp->clock = readb(bp->i2c_control_regs + 0x01); attach_one_i2c()
344 bp->index, bp->i2c_control_regs, entry, bp->own, bp->clock); attach_one_i2c()
346 reset_one_i2c(bp); attach_one_i2c()
348 return bp; attach_one_i2c()
351 if (bp->i2c_bussel_reg) attach_one_i2c()
352 of_iounmap(&op->resource[1], bp->i2c_bussel_reg, 1); attach_one_i2c()
353 if (bp->i2c_control_regs) attach_one_i2c()
354 of_iounmap(&op->resource[0], bp->i2c_control_regs, 2); attach_one_i2c()
355 kfree(bp); attach_one_i2c()
359 extern int bbc_envctrl_init(struct bbc_i2c_bus *bp);
360 extern void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp);
364 struct bbc_i2c_bus *bp; bbc_i2c_probe() local
367 bp = attach_one_i2c(op, index); bbc_i2c_probe()
368 if (!bp) bbc_i2c_probe()
371 err = bbc_envctrl_init(bp); bbc_i2c_probe()
373 free_irq(op->archdata.irqs[0], bp); bbc_i2c_probe()
374 if (bp->i2c_bussel_reg) bbc_i2c_probe()
375 of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); bbc_i2c_probe()
376 if (bp->i2c_control_regs) bbc_i2c_probe()
377 of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); bbc_i2c_probe()
378 kfree(bp); bbc_i2c_probe()
380 dev_set_drvdata(&op->dev, bp); bbc_i2c_probe()
388 struct bbc_i2c_bus *bp = dev_get_drvdata(&op->dev); bbc_i2c_remove() local
390 bbc_envctrl_cleanup(bp); bbc_i2c_remove()
392 free_irq(op->archdata.irqs[0], bp); bbc_i2c_remove()
394 if (bp->i2c_bussel_reg) bbc_i2c_remove()
395 of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); bbc_i2c_remove()
396 if (bp->i2c_control_regs) bbc_i2c_remove()
397 of_iounmap(&op->resource[1], bp->i2c_control_regs, 2); bbc_i2c_remove()
399 kfree(bp); bbc_i2c_remove()
H A Dbbc_envctrl.c445 static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op, attach_one_temp() argument
457 tp->client = bbc_i2c_attach(bp, op); attach_one_temp()
467 list_add(&tp->bp_list, &bp->temps); attach_one_temp()
493 static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op, attach_one_fan() argument
505 fp->client = bbc_i2c_attach(bp, op); attach_one_fan()
514 list_add(&fp->bp_list, &bp->fans); attach_one_fan()
537 static void destroy_all_temps(struct bbc_i2c_bus *bp) destroy_all_temps() argument
541 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { destroy_all_temps()
554 static void destroy_all_fans(struct bbc_i2c_bus *bp) destroy_all_fans() argument
558 list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { destroy_all_fans()
565 int bbc_envctrl_init(struct bbc_i2c_bus *bp) bbc_envctrl_init() argument
572 while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { bbc_envctrl_init()
574 attach_one_temp(bp, op, temp_index++); bbc_envctrl_init()
576 attach_one_fan(bp, op, fan_index++); bbc_envctrl_init()
584 destroy_all_temps(bp); bbc_envctrl_init()
585 destroy_all_fans(bp); bbc_envctrl_init()
593 void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) bbc_envctrl_cleanup() argument
598 destroy_all_temps(bp); bbc_envctrl_cleanup()
599 destroy_all_fans(bp); bbc_envctrl_cleanup()
H A Dbbc_i2c.h9 struct bbc_i2c_bus *bp; member in struct:bbc_i2c_client
76 extern struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct platform_device *);
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/
H A Dbnxt_sriov.c22 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) bnxt_vf_ndo_prep() argument
24 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { bnxt_vf_ndo_prep()
25 netdev_err(bp->dev, "vf ndo called though PF is down\n"); bnxt_vf_ndo_prep()
28 if (!bp->pf.active_vfs) { bnxt_vf_ndo_prep()
29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); bnxt_vf_ndo_prep()
32 if (vf_id >= bp->pf.max_vfs) { bnxt_vf_ndo_prep()
33 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); bnxt_vf_ndo_prep()
42 struct bnxt *bp = netdev_priv(dev); bnxt_set_vf_spoofchk() local
48 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_set_vf_spoofchk()
52 vf = &bp->pf.vf[vf_id]; bnxt_set_vf_spoofchk()
66 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_set_vf_spoofchk()
69 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_set_vf_spoofchk()
83 struct bnxt *bp = netdev_priv(dev); bnxt_get_vf_config() local
87 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_get_vf_config()
92 vf = &bp->pf.vf[vf_id]; bnxt_get_vf_config()
113 struct bnxt *bp = netdev_priv(dev); bnxt_set_vf_mac() local
117 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_set_vf_mac()
127 vf = &bp->pf.vf[vf_id]; bnxt_set_vf_mac()
130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_set_vf_mac()
135 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_set_vf_mac()
141 struct bnxt *bp = netdev_priv(dev); bnxt_set_vf_vlan() local
146 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_set_vf_vlan()
156 vf = &bp->pf.vf[vf_id]; bnxt_set_vf_vlan()
161 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_set_vf_vlan()
166 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_set_vf_vlan()
176 struct bnxt *bp = netdev_priv(dev); bnxt_set_vf_bw() local
181 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_set_vf_bw()
185 vf = &bp->pf.vf[vf_id]; bnxt_set_vf_bw()
186 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); bnxt_set_vf_bw()
188 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", bnxt_set_vf_bw()
194 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", bnxt_set_vf_bw()
200 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_set_vf_bw()
207 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_set_vf_bw()
217 struct bnxt *bp = netdev_priv(dev); bnxt_set_vf_link_state() local
221 rc = bnxt_vf_ndo_prep(bp, vf_id); bnxt_set_vf_link_state()
225 vf = &bp->pf.vf[vf_id]; bnxt_set_vf_link_state()
239 netdev_err(bp->dev, "Invalid link option\n"); bnxt_set_vf_link_state()
248 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) bnxt_set_vf_attr() argument
254 vf = &bp->pf.vf[i]; bnxt_set_vf_attr()
261 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) bnxt_hwrm_func_vf_resource_free() argument
264 struct bnxt_pf_info *pf = &bp->pf; bnxt_hwrm_func_vf_resource_free()
267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); bnxt_hwrm_func_vf_resource_free()
269 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_vf_resource_free()
272 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_func_vf_resource_free()
277 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_vf_resource_free()
281 static void bnxt_free_vf_resources(struct bnxt *bp) bnxt_free_vf_resources() argument
283 struct pci_dev *pdev = bp->pdev; bnxt_free_vf_resources()
286 kfree(bp->pf.vf_event_bmap); bnxt_free_vf_resources()
287 bp->pf.vf_event_bmap = NULL; bnxt_free_vf_resources()
290 if (bp->pf.hwrm_cmd_req_addr[i]) { bnxt_free_vf_resources()
292 bp->pf.hwrm_cmd_req_addr[i], bnxt_free_vf_resources()
293 bp->pf.hwrm_cmd_req_dma_addr[i]); bnxt_free_vf_resources()
294 bp->pf.hwrm_cmd_req_addr[i] = NULL; bnxt_free_vf_resources()
298 kfree(bp->pf.vf); bnxt_free_vf_resources()
299 bp->pf.vf = NULL; bnxt_free_vf_resources()
302 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) bnxt_alloc_vf_resources() argument
304 struct pci_dev *pdev = bp->pdev; bnxt_alloc_vf_resources()
307 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); bnxt_alloc_vf_resources()
308 if (!bp->pf.vf) bnxt_alloc_vf_resources()
311 bnxt_set_vf_attr(bp, num_vfs); bnxt_alloc_vf_resources()
319 bp->pf.hwrm_cmd_req_addr[i] = bnxt_alloc_vf_resources()
321 &bp->pf.hwrm_cmd_req_dma_addr[i], bnxt_alloc_vf_resources()
324 if (!bp->pf.hwrm_cmd_req_addr[i]) bnxt_alloc_vf_resources()
328 struct bnxt_vf_info *vf = &bp->pf.vf[k]; bnxt_alloc_vf_resources()
330 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + bnxt_alloc_vf_resources()
333 bp->pf.hwrm_cmd_req_dma_addr[i] + j * bnxt_alloc_vf_resources()
340 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL); bnxt_alloc_vf_resources()
341 if (!bp->pf.vf_event_bmap) bnxt_alloc_vf_resources()
344 bp->pf.hwrm_cmd_req_pages = nr_pages; bnxt_alloc_vf_resources()
348 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) bnxt_hwrm_func_buf_rgtr() argument
352 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); bnxt_hwrm_func_buf_rgtr()
354 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); bnxt_hwrm_func_buf_rgtr()
357 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); bnxt_hwrm_func_buf_rgtr()
358 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); bnxt_hwrm_func_buf_rgtr()
359 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); bnxt_hwrm_func_buf_rgtr()
360 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); bnxt_hwrm_func_buf_rgtr()
362 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_func_buf_rgtr()
366 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) bnxt_hwrm_func_cfg() argument
371 struct bnxt_pf_info *pf = &bp->pf; bnxt_hwrm_func_cfg()
373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_func_cfg()
381 vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); bnxt_hwrm_func_cfg()
382 vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; bnxt_hwrm_func_cfg()
384 /* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ bnxt_hwrm_func_cfg()
385 vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; bnxt_hwrm_func_cfg()
386 if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_hwrm_func_cfg()
387 vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / bnxt_hwrm_func_cfg()
390 vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / bnxt_hwrm_func_cfg()
392 vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; bnxt_hwrm_func_cfg()
404 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; bnxt_hwrm_func_cfg()
419 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_cfg()
422 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_func_cfg()
426 bp->pf.active_vfs = i + 1; bnxt_hwrm_func_cfg()
427 bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); bnxt_hwrm_func_cfg()
429 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_cfg()
431 bp->pf.max_pf_tx_rings = bp->tx_nr_rings; bnxt_hwrm_func_cfg()
432 if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_hwrm_func_cfg()
433 bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; bnxt_hwrm_func_cfg()
435 bp->pf.max_pf_rx_rings = bp->rx_nr_rings; bnxt_hwrm_func_cfg()
440 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) bnxt_sriov_enable() argument
457 if (bp->flags & BNXT_FLAG_AGG_RINGS) { bnxt_sriov_enable()
458 if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >= bnxt_sriov_enable()
462 if (bp->pf.max_rx_rings - bp->rx_nr_rings >= bnxt_sriov_enable()
467 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) bnxt_sriov_enable()
470 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) bnxt_sriov_enable()
480 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); bnxt_sriov_enable()
485 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", bnxt_sriov_enable()
490 rc = bnxt_alloc_vf_resources(bp, *num_vfs); bnxt_sriov_enable()
495 rc = bnxt_hwrm_func_cfg(bp, num_vfs); bnxt_sriov_enable()
500 rc = bnxt_hwrm_func_buf_rgtr(bp); bnxt_sriov_enable()
504 rc = pci_enable_sriov(bp->pdev, *num_vfs); bnxt_sriov_enable()
512 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); bnxt_sriov_enable()
515 bnxt_free_vf_resources(bp); bnxt_sriov_enable()
520 void bnxt_sriov_disable(struct bnxt *bp) bnxt_sriov_disable() argument
522 u16 num_vfs = pci_num_vf(bp->pdev); bnxt_sriov_disable()
527 if (pci_vfs_assigned(bp->pdev)) { bnxt_sriov_disable()
528 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", bnxt_sriov_disable()
531 pci_disable_sriov(bp->pdev); bnxt_sriov_disable()
533 bnxt_hwrm_func_vf_resource_free(bp, num_vfs); bnxt_sriov_disable()
536 bnxt_free_vf_resources(bp); bnxt_sriov_disable()
538 bp->pf.active_vfs = 0; bnxt_sriov_disable()
539 bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; bnxt_sriov_disable()
540 bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; bnxt_sriov_disable()
546 struct bnxt *bp = netdev_priv(dev); bnxt_sriov_configure() local
548 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { bnxt_sriov_configure()
559 bp->sriov_cfg = true; bnxt_sriov_configure()
562 if (pci_vfs_assigned(bp->pdev)) { bnxt_sriov_configure()
569 if (num_vfs && num_vfs == bp->pf.active_vfs) bnxt_sriov_configure()
573 bnxt_sriov_disable(bp); bnxt_sriov_configure()
577 bnxt_sriov_enable(bp, &num_vfs); bnxt_sriov_configure()
580 bp->sriov_cfg = false; bnxt_sriov_configure()
581 wake_up(&bp->sriov_cfg_wait); bnxt_sriov_configure()
586 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_fwd_resp() argument
592 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_fwd_resp()
594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); bnxt_hwrm_fwd_resp()
603 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_fwd_resp()
604 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_fwd_resp()
607 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); bnxt_hwrm_fwd_resp()
612 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", bnxt_hwrm_fwd_resp()
618 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_fwd_resp()
622 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_fwd_err_resp() argument
627 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_fwd_err_resp()
629 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); bnxt_hwrm_fwd_err_resp()
634 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_fwd_err_resp()
635 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_fwd_err_resp()
638 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); bnxt_hwrm_fwd_err_resp()
643 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", bnxt_hwrm_fwd_err_resp()
649 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_fwd_err_resp()
653 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, bnxt_hwrm_exec_fwd_resp() argument
658 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_exec_fwd_resp()
660 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); bnxt_hwrm_exec_fwd_resp()
665 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_exec_fwd_resp()
666 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_exec_fwd_resp()
669 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); bnxt_hwrm_exec_fwd_resp()
674 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", bnxt_hwrm_exec_fwd_resp()
680 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_exec_fwd_resp()
684 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) bnxt_vf_validate_set_mac() argument
692 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); bnxt_vf_validate_set_mac()
694 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); bnxt_vf_validate_set_mac()
697 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) bnxt_vf_set_link() argument
704 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); bnxt_vf_set_link()
711 mutex_lock(&bp->hwrm_cmd_lock); bnxt_vf_set_link()
712 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, bnxt_vf_set_link()
714 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_vf_set_link()
742 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, bnxt_vf_set_link()
750 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) bnxt_vf_req_validate_snd() argument
758 rc = bnxt_vf_validate_set_mac(bp, vf); bnxt_vf_req_validate_snd()
765 bp, vf, sizeof(struct hwrm_func_cfg_input)); bnxt_vf_req_validate_snd()
768 rc = bnxt_vf_set_link(bp, vf); bnxt_vf_req_validate_snd()
776 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) bnxt_hwrm_exec_fwd_req() argument
778 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; bnxt_hwrm_exec_fwd_req()
782 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); bnxt_hwrm_exec_fwd_req()
786 clear_bit(vf_id, bp->pf.vf_event_bmap); bnxt_hwrm_exec_fwd_req()
787 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); bnxt_hwrm_exec_fwd_req()
792 void bnxt_update_vf_mac(struct bnxt *bp) bnxt_update_vf_mac() argument
795 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; bnxt_update_vf_mac()
797 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); bnxt_update_vf_mac()
800 mutex_lock(&bp->hwrm_cmd_lock); bnxt_update_vf_mac()
801 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) bnxt_update_vf_mac()
807 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) bnxt_update_vf_mac()
808 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); bnxt_update_vf_mac()
810 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); bnxt_update_vf_mac()
812 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_update_vf_mac()
817 void bnxt_sriov_disable(struct bnxt *bp) bnxt_sriov_disable() argument
821 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) bnxt_hwrm_exec_fwd_req() argument
823 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); bnxt_hwrm_exec_fwd_req()
826 void bnxt_update_vf_mac(struct bnxt *bp) bnxt_update_vf_mac() argument
H A Dbnxt.c133 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) bnxt_tx_avail() argument
138 return bp->tx_ring_size - bnxt_tx_avail()
139 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); bnxt_tx_avail()
166 struct bnxt *bp = netdev_priv(dev); bnxt_start_xmit() local
175 struct pci_dev *pdev = bp->pdev; bnxt_start_xmit()
181 if (unlikely(i >= bp->tx_nr_rings)) { bnxt_start_xmit()
186 bnapi = bp->bnapi[i]; bnxt_start_xmit()
191 free_size = bnxt_tx_avail(bp, txr); bnxt_start_xmit()
221 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { bnxt_start_xmit()
380 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { bnxt_start_xmit()
389 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) bnxt_start_xmit()
418 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) bnxt_tx_int() argument
422 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); bnxt_tx_int()
424 struct pci_dev *pdev = bp->pdev; bnxt_tx_int()
475 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { bnxt_tx_int()
478 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && bnxt_tx_int()
485 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, __bnxt_alloc_rx_data() argument
489 struct pci_dev *pdev = bp->pdev; __bnxt_alloc_rx_data()
491 data = kmalloc(bp->rx_buf_size, gfp); __bnxt_alloc_rx_data()
496 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); __bnxt_alloc_rx_data()
505 static inline int bnxt_alloc_rx_data(struct bnxt *bp, bnxt_alloc_rx_data() argument
514 data = __bnxt_alloc_rx_data(bp, &mapping, gfp); bnxt_alloc_rx_data()
557 static inline int bnxt_alloc_rx_page(struct bnxt *bp, bnxt_alloc_rx_page() argument
564 struct pci_dev *pdev = bp->pdev; bnxt_alloc_rx_page()
597 struct bnxt *bp = bnapi->bp; bnxt_reuse_rx_agg_bufs() local
645 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, bnxt_rx_skb() argument
653 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); bnxt_rx_skb()
660 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bnxt_rx_skb()
672 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, bnxt_rx_pages() argument
676 struct pci_dev *pdev = bp->pdev; bnxt_rx_pages()
707 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { bnxt_rx_pages()
741 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, bnxt_agg_bufs_valid() argument
758 struct bnxt *bp = bnapi->bp; bnxt_copy_skb() local
759 struct pci_dev *pdev = bp->pdev; bnxt_copy_skb()
767 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); bnxt_copy_skb()
772 bp->rx_copy_thresh, bnxt_copy_skb()
779 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bnxt_tpa_start() argument
825 if (netif_msg_rx_err(bp)) bnxt_tpa_start()
826 netdev_warn(bp->dev, "TPA packet without valid hash\n"); bnxt_tpa_start()
840 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, bnxt_abort_tpa() argument
924 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, bnxt_tpa_end() argument
951 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) bnxt_tpa_end()
959 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); bnxt_tpa_end()
960 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", bnxt_tpa_end()
965 if (len <= bp->rx_copy_thresh) { bnxt_tpa_end()
968 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); bnxt_tpa_end()
975 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); bnxt_tpa_end()
977 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); bnxt_tpa_end()
985 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bnxt_tpa_end()
990 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); bnxt_tpa_end()
998 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); bnxt_tpa_end()
1004 skb->protocol = eth_type_trans(skb, bp->dev); bnxt_tpa_end()
1044 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, bnxt_rx_pkt() argument
1049 struct net_device *dev = bp->dev; bnxt_rx_pkt()
1077 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, bnxt_rx_pkt()
1083 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, bnxt_rx_pkt()
1113 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) bnxt_rx_pkt()
1133 if (len <= bp->rx_copy_thresh) { bnxt_rx_pkt()
1141 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); bnxt_rx_pkt()
1149 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); bnxt_rx_pkt()
1211 static int bnxt_async_event_process(struct bnxt *bp, bnxt_async_event_process() argument
1219 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); bnxt_async_event_process()
1220 schedule_work(&bp->sp_task); bnxt_async_event_process()
1223 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", bnxt_async_event_process()
1230 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) bnxt_hwrm_handler() argument
1240 if (seq_id == bp->hwrm_intr_seq_id) bnxt_hwrm_handler()
1241 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; bnxt_hwrm_handler()
1243 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); bnxt_hwrm_handler()
1249 if ((vf_id < bp->pf.first_vf_id) || bnxt_hwrm_handler()
1250 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { bnxt_hwrm_handler()
1251 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", bnxt_hwrm_handler()
1256 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); bnxt_hwrm_handler()
1257 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); bnxt_hwrm_handler()
1258 schedule_work(&bp->sp_task); bnxt_hwrm_handler()
1262 bnxt_async_event_process(bp, bnxt_hwrm_handler()
1275 struct bnxt *bp = bnapi->bp; bnxt_msix() local
1284 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) bnxt_has_work() argument
1298 struct bnxt *bp = bnapi->bp; bnxt_inta() local
1305 if (!bnxt_has_work(bp, cpr)) { bnxt_inta()
1306 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); bnxt_inta()
1316 if (unlikely(atomic_read(&bp->intr_sem) != 0)) bnxt_inta()
1323 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnxt_poll_work() argument
1346 if (unlikely(tx_pkts > bp->tx_wake_thresh)) bnxt_poll_work()
1349 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); bnxt_poll_work()
1361 bnxt_hwrm_handler(bp, txcmp); bnxt_poll_work()
1377 bnxt_tx_int(bp, bnapi, tx_pkts); bnxt_poll_work()
1397 struct bnxt *bp = bnapi->bp; bnxt_poll() local
1405 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); bnxt_poll()
1410 if (!bnxt_has_work(bp, cpr)) { bnxt_poll()
1425 struct bnxt *bp = bnapi->bp; bnxt_busy_poll() local
1429 if (atomic_read(&bp->intr_sem) != 0) bnxt_busy_poll()
1435 rx_work = bnxt_poll_work(bp, bnapi, budget); bnxt_busy_poll()
1444 static void bnxt_free_tx_skbs(struct bnxt *bp) bnxt_free_tx_skbs() argument
1447 struct pci_dev *pdev = bp->pdev; bnxt_free_tx_skbs()
1449 if (!bp->bnapi) bnxt_free_tx_skbs()
1452 max_idx = bp->tx_nr_pages * TX_DESC_CNT; bnxt_free_tx_skbs()
1453 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_free_tx_skbs()
1454 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_tx_skbs()
1498 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); bnxt_free_tx_skbs()
1502 static void bnxt_free_rx_skbs(struct bnxt *bp) bnxt_free_rx_skbs() argument
1505 struct pci_dev *pdev = bp->pdev; bnxt_free_rx_skbs()
1507 if (!bp->bnapi) bnxt_free_rx_skbs()
1510 max_idx = bp->rx_nr_pages * RX_DESC_CNT; bnxt_free_rx_skbs()
1511 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; bnxt_free_rx_skbs()
1512 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_free_rx_skbs()
1513 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_rx_skbs()
1534 bp->rx_buf_use_size, bnxt_free_rx_skbs()
1552 bp->rx_buf_use_size, bnxt_free_rx_skbs()
1580 static void bnxt_free_skbs(struct bnxt *bp) bnxt_free_skbs() argument
1582 bnxt_free_tx_skbs(bp); bnxt_free_skbs()
1583 bnxt_free_rx_skbs(bp); bnxt_free_skbs()
1586 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) bnxt_free_ring() argument
1588 struct pci_dev *pdev = bp->pdev; bnxt_free_ring()
1611 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) bnxt_alloc_ring() argument
1614 struct pci_dev *pdev = bp->pdev; bnxt_alloc_ring()
1645 static void bnxt_free_rx_rings(struct bnxt *bp) bnxt_free_rx_rings() argument
1649 if (!bp->bnapi) bnxt_free_rx_rings()
1652 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_free_rx_rings()
1653 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_rx_rings()
1669 bnxt_free_ring(bp, ring); bnxt_free_rx_rings()
1672 bnxt_free_ring(bp, ring); bnxt_free_rx_rings()
1676 static int bnxt_alloc_rx_rings(struct bnxt *bp) bnxt_alloc_rx_rings() argument
1680 if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_alloc_rx_rings()
1683 if (bp->flags & BNXT_FLAG_TPA) bnxt_alloc_rx_rings()
1686 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_alloc_rx_rings()
1687 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_alloc_rx_rings()
1697 rc = bnxt_alloc_ring(bp, ring); bnxt_alloc_rx_rings()
1705 rc = bnxt_alloc_ring(bp, ring); bnxt_alloc_rx_rings()
1709 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; bnxt_alloc_rx_rings()
1727 static void bnxt_free_tx_rings(struct bnxt *bp) bnxt_free_tx_rings() argument
1730 struct pci_dev *pdev = bp->pdev; bnxt_free_tx_rings()
1732 if (!bp->bnapi) bnxt_free_tx_rings()
1735 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_free_tx_rings()
1736 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_tx_rings()
1746 dma_free_coherent(&pdev->dev, bp->tx_push_size, bnxt_free_tx_rings()
1753 bnxt_free_ring(bp, ring); bnxt_free_tx_rings()
1757 static int bnxt_alloc_tx_rings(struct bnxt *bp) bnxt_alloc_tx_rings() argument
1760 struct pci_dev *pdev = bp->pdev; bnxt_alloc_tx_rings()
1762 bp->tx_push_size = 0; bnxt_alloc_tx_rings()
1763 if (bp->tx_push_thresh) { bnxt_alloc_tx_rings()
1767 bp->tx_push_thresh); bnxt_alloc_tx_rings()
1771 bp->tx_push_thresh = 0; bnxt_alloc_tx_rings()
1774 bp->tx_push_size = push_size; bnxt_alloc_tx_rings()
1777 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { bnxt_alloc_tx_rings()
1778 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_alloc_tx_rings()
1788 rc = bnxt_alloc_ring(bp, ring); bnxt_alloc_tx_rings()
1792 if (bp->tx_push_size) { bnxt_alloc_tx_rings()
1800 bp->tx_push_size, bnxt_alloc_tx_rings()
1815 ring->queue_id = bp->q_info[j].queue_id; bnxt_alloc_tx_rings()
1816 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) bnxt_alloc_tx_rings()
1822 static void bnxt_free_cp_rings(struct bnxt *bp) bnxt_free_cp_rings() argument
1826 if (!bp->bnapi) bnxt_free_cp_rings()
1829 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_free_cp_rings()
1830 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_cp_rings()
1840 bnxt_free_ring(bp, ring); bnxt_free_cp_rings()
1844 static int bnxt_alloc_cp_rings(struct bnxt *bp) bnxt_alloc_cp_rings() argument
1848 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_alloc_cp_rings()
1849 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_alloc_cp_rings()
1859 rc = bnxt_alloc_ring(bp, ring); bnxt_alloc_cp_rings()
1866 static void bnxt_init_ring_struct(struct bnxt *bp) bnxt_init_ring_struct() argument
1870 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_init_ring_struct()
1871 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_init_ring_struct()
1882 ring->nr_pages = bp->cp_nr_pages; bnxt_init_ring_struct()
1890 ring->nr_pages = bp->rx_nr_pages; bnxt_init_ring_struct()
1894 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; bnxt_init_ring_struct()
1898 ring->nr_pages = bp->rx_agg_nr_pages; bnxt_init_ring_struct()
1902 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; bnxt_init_ring_struct()
1907 ring->nr_pages = bp->tx_nr_pages; bnxt_init_ring_struct()
1911 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; bnxt_init_ring_struct()
1938 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) bnxt_init_one_rx_ring() argument
1940 struct net_device *dev = bp->dev; bnxt_init_one_rx_ring()
1941 struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; bnxt_init_one_rx_ring()
1950 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | bnxt_init_one_rx_ring()
1961 for (i = 0; i < bp->rx_ring_size; i++) { bnxt_init_one_rx_ring()
1962 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { bnxt_init_one_rx_ring()
1964 ring_nr, i, bp->rx_ring_size); bnxt_init_one_rx_ring()
1972 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) bnxt_init_one_rx_ring()
1983 for (i = 0; i < bp->rx_agg_ring_size; i++) { bnxt_init_one_rx_ring()
1984 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { bnxt_init_one_rx_ring()
1986 ring_nr, i, bp->rx_ring_size); bnxt_init_one_rx_ring()
1994 if (bp->flags & BNXT_FLAG_TPA) { bnxt_init_one_rx_ring()
2000 data = __bnxt_alloc_rx_data(bp, &mapping, bnxt_init_one_rx_ring()
2009 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); bnxt_init_one_rx_ring()
2017 static int bnxt_init_rx_rings(struct bnxt *bp) bnxt_init_rx_rings() argument
2021 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_init_rx_rings()
2022 rc = bnxt_init_one_rx_ring(bp, i); bnxt_init_rx_rings()
2030 static int bnxt_init_tx_rings(struct bnxt *bp) bnxt_init_tx_rings() argument
2034 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, bnxt_init_tx_rings()
2037 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_init_tx_rings()
2038 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_init_tx_rings()
2048 static void bnxt_free_ring_grps(struct bnxt *bp) bnxt_free_ring_grps() argument
2050 kfree(bp->grp_info); bnxt_free_ring_grps()
2051 bp->grp_info = NULL; bnxt_free_ring_grps()
2054 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) bnxt_init_ring_grps() argument
2059 bp->grp_info = kcalloc(bp->cp_nr_rings, bnxt_init_ring_grps()
2062 if (!bp->grp_info) bnxt_init_ring_grps()
2065 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_init_ring_grps()
2067 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; bnxt_init_ring_grps()
2068 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; bnxt_init_ring_grps()
2069 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; bnxt_init_ring_grps()
2070 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; bnxt_init_ring_grps()
2071 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; bnxt_init_ring_grps()
2076 static void bnxt_free_vnics(struct bnxt *bp) bnxt_free_vnics() argument
2078 kfree(bp->vnic_info); bnxt_free_vnics()
2079 bp->vnic_info = NULL; bnxt_free_vnics()
2080 bp->nr_vnics = 0; bnxt_free_vnics()
2083 static int bnxt_alloc_vnics(struct bnxt *bp) bnxt_alloc_vnics() argument
2088 if (bp->flags & BNXT_FLAG_RFS) bnxt_alloc_vnics()
2089 num_vnics += bp->rx_nr_rings; bnxt_alloc_vnics()
2092 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), bnxt_alloc_vnics()
2094 if (!bp->vnic_info) bnxt_alloc_vnics()
2097 bp->nr_vnics = num_vnics; bnxt_alloc_vnics()
2101 static void bnxt_init_vnics(struct bnxt *bp) bnxt_init_vnics() argument
2105 for (i = 0; i < bp->nr_vnics; i++) { bnxt_init_vnics()
2106 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; bnxt_init_vnics()
2112 if (bp->vnic_info[i].rss_hash_key) { bnxt_init_vnics()
2118 bp->vnic_info[0].rss_hash_key, bnxt_init_vnics()
2141 static void bnxt_set_tpa_flags(struct bnxt *bp) bnxt_set_tpa_flags() argument
2143 bp->flags &= ~BNXT_FLAG_TPA; bnxt_set_tpa_flags()
2144 if (bp->dev->features & NETIF_F_LRO) bnxt_set_tpa_flags()
2145 bp->flags |= BNXT_FLAG_LRO; bnxt_set_tpa_flags()
2146 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) bnxt_set_tpa_flags()
2147 bp->flags |= BNXT_FLAG_GRO; bnxt_set_tpa_flags()
2150 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2153 void bnxt_set_ring_params(struct bnxt *bp) bnxt_set_ring_params() argument
2159 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); bnxt_set_ring_params()
2164 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; bnxt_set_ring_params()
2165 ring_size = bp->rx_ring_size; bnxt_set_ring_params()
2166 bp->rx_agg_ring_size = 0; bnxt_set_ring_params()
2167 bp->rx_agg_nr_pages = 0; bnxt_set_ring_params()
2169 if (bp->flags & BNXT_FLAG_TPA) bnxt_set_ring_params()
2172 bp->flags &= ~BNXT_FLAG_JUMBO; bnxt_set_ring_params()
2176 bp->flags |= BNXT_FLAG_JUMBO; bnxt_set_ring_params()
2177 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; bnxt_set_ring_params()
2184 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, bnxt_set_ring_params()
2186 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { bnxt_set_ring_params()
2189 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; bnxt_set_ring_params()
2191 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", bnxt_set_ring_params()
2194 bp->rx_agg_ring_size = agg_ring_size; bnxt_set_ring_params()
2195 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; bnxt_set_ring_params()
2201 bp->rx_buf_use_size = rx_size; bnxt_set_ring_params()
2202 bp->rx_buf_size = rx_space; bnxt_set_ring_params()
2204 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); bnxt_set_ring_params()
2205 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; bnxt_set_ring_params()
2207 ring_size = bp->tx_ring_size; bnxt_set_ring_params()
2208 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bnxt_set_ring_params()
2209 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; bnxt_set_ring_params()
2211 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; bnxt_set_ring_params()
2212 bp->cp_ring_size = ring_size; bnxt_set_ring_params()
2214 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); bnxt_set_ring_params()
2215 if (bp->cp_nr_pages > MAX_CP_PAGES) { bnxt_set_ring_params()
2216 bp->cp_nr_pages = MAX_CP_PAGES; bnxt_set_ring_params()
2217 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; bnxt_set_ring_params()
2218 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", bnxt_set_ring_params()
2219 ring_size, bp->cp_ring_size); bnxt_set_ring_params()
2221 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; bnxt_set_ring_params()
2222 bp->cp_ring_mask = bp->cp_bit - 1; bnxt_set_ring_params()
2225 static void bnxt_free_vnic_attributes(struct bnxt *bp) bnxt_free_vnic_attributes() argument
2229 struct pci_dev *pdev = bp->pdev; bnxt_free_vnic_attributes()
2231 if (!bp->vnic_info) bnxt_free_vnic_attributes()
2234 for (i = 0; i < bp->nr_vnics; i++) { bnxt_free_vnic_attributes()
2235 vnic = &bp->vnic_info[i]; bnxt_free_vnic_attributes()
2261 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) bnxt_alloc_vnic_attributes() argument
2265 struct pci_dev *pdev = bp->pdev; bnxt_alloc_vnic_attributes()
2268 for (i = 0; i < bp->nr_vnics; i++) { bnxt_alloc_vnic_attributes()
2269 vnic = &bp->vnic_info[i]; bnxt_alloc_vnic_attributes()
2297 max_rings = bp->rx_nr_rings; bnxt_alloc_vnic_attributes()
2327 static void bnxt_free_hwrm_resources(struct bnxt *bp) bnxt_free_hwrm_resources() argument
2329 struct pci_dev *pdev = bp->pdev; bnxt_free_hwrm_resources()
2331 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, bnxt_free_hwrm_resources()
2332 bp->hwrm_cmd_resp_dma_addr); bnxt_free_hwrm_resources()
2334 bp->hwrm_cmd_resp_addr = NULL; bnxt_free_hwrm_resources()
2335 if (bp->hwrm_dbg_resp_addr) { bnxt_free_hwrm_resources()
2337 bp->hwrm_dbg_resp_addr, bnxt_free_hwrm_resources()
2338 bp->hwrm_dbg_resp_dma_addr); bnxt_free_hwrm_resources()
2340 bp->hwrm_dbg_resp_addr = NULL; bnxt_free_hwrm_resources()
2344 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) bnxt_alloc_hwrm_resources() argument
2346 struct pci_dev *pdev = bp->pdev; bnxt_alloc_hwrm_resources()
2348 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, bnxt_alloc_hwrm_resources()
2349 &bp->hwrm_cmd_resp_dma_addr, bnxt_alloc_hwrm_resources()
2351 if (!bp->hwrm_cmd_resp_addr) bnxt_alloc_hwrm_resources()
2353 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, bnxt_alloc_hwrm_resources()
2355 &bp->hwrm_dbg_resp_dma_addr, bnxt_alloc_hwrm_resources()
2357 if (!bp->hwrm_dbg_resp_addr) bnxt_alloc_hwrm_resources()
2358 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); bnxt_alloc_hwrm_resources()
2363 static void bnxt_free_stats(struct bnxt *bp) bnxt_free_stats() argument
2366 struct pci_dev *pdev = bp->pdev; bnxt_free_stats()
2368 if (!bp->bnapi) bnxt_free_stats()
2373 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_free_stats()
2374 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_free_stats()
2385 static int bnxt_alloc_stats(struct bnxt *bp) bnxt_alloc_stats() argument
2388 struct pci_dev *pdev = bp->pdev; bnxt_alloc_stats()
2392 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_alloc_stats()
2393 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_alloc_stats()
2407 static void bnxt_clear_ring_indices(struct bnxt *bp) bnxt_clear_ring_indices() argument
2411 if (!bp->bnapi) bnxt_clear_ring_indices()
2414 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_clear_ring_indices()
2415 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_clear_ring_indices()
2437 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) bnxt_free_ntp_fltrs() argument
2450 head = &bp->ntp_fltr_hash_tbl[i]; hlist_for_each_entry_safe()
2457 kfree(bp->ntp_fltr_bmap);
2458 bp->ntp_fltr_bmap = NULL;
2460 bp->ntp_fltr_count = 0;
2464 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) bnxt_alloc_ntp_fltrs() argument
2469 if (!(bp->flags & BNXT_FLAG_RFS)) bnxt_alloc_ntp_fltrs()
2473 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); bnxt_alloc_ntp_fltrs()
2475 bp->ntp_fltr_count = 0; bnxt_alloc_ntp_fltrs()
2476 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), bnxt_alloc_ntp_fltrs()
2479 if (!bp->ntp_fltr_bmap) bnxt_alloc_ntp_fltrs()
2488 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_mem() argument
2490 bnxt_free_vnic_attributes(bp); bnxt_free_mem()
2491 bnxt_free_tx_rings(bp); bnxt_free_mem()
2492 bnxt_free_rx_rings(bp); bnxt_free_mem()
2493 bnxt_free_cp_rings(bp); bnxt_free_mem()
2494 bnxt_free_ntp_fltrs(bp, irq_re_init); bnxt_free_mem()
2496 bnxt_free_stats(bp); bnxt_free_mem()
2497 bnxt_free_ring_grps(bp); bnxt_free_mem()
2498 bnxt_free_vnics(bp); bnxt_free_mem()
2499 kfree(bp->bnapi); bnxt_free_mem()
2500 bp->bnapi = NULL; bnxt_free_mem()
2502 bnxt_clear_ring_indices(bp); bnxt_free_mem()
2506 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) bnxt_alloc_mem() argument
2516 bp->cp_nr_rings); bnxt_alloc_mem()
2518 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); bnxt_alloc_mem()
2522 bp->bnapi = bnapi; bnxt_alloc_mem()
2524 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { bnxt_alloc_mem()
2525 bp->bnapi[i] = bnapi; bnxt_alloc_mem()
2526 bp->bnapi[i]->index = i; bnxt_alloc_mem()
2527 bp->bnapi[i]->bp = bp; bnxt_alloc_mem()
2530 rc = bnxt_alloc_stats(bp); bnxt_alloc_mem()
2534 rc = bnxt_alloc_ntp_fltrs(bp); bnxt_alloc_mem()
2538 rc = bnxt_alloc_vnics(bp); bnxt_alloc_mem()
2543 bnxt_init_ring_struct(bp); bnxt_alloc_mem()
2545 rc = bnxt_alloc_rx_rings(bp); bnxt_alloc_mem()
2549 rc = bnxt_alloc_tx_rings(bp); bnxt_alloc_mem()
2553 rc = bnxt_alloc_cp_rings(bp); bnxt_alloc_mem()
2557 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | bnxt_alloc_mem()
2559 rc = bnxt_alloc_vnic_attributes(bp); bnxt_alloc_mem()
2565 bnxt_free_mem(bp, true); bnxt_alloc_mem()
2569 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, bnxt_hwrm_cmd_hdr_init() argument
2577 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); bnxt_hwrm_cmd_hdr_init()
2580 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) _hwrm_send_message() argument
2587 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; _hwrm_send_message()
2589 req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); _hwrm_send_message()
2597 __iowrite32_copy(bp->bar0, data, msg_len / 4); _hwrm_send_message()
2601 bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & _hwrm_send_message()
2605 writel(1, bp->bar0 + 0x100); _hwrm_send_message()
2610 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && _hwrm_send_message()
2615 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { _hwrm_send_message()
2616 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", _hwrm_send_message()
2622 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; _hwrm_send_message()
2632 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", _hwrm_send_message()
2639 valid = bp->hwrm_cmd_resp_addr + len - 4; _hwrm_send_message()
2647 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", _hwrm_send_message()
2656 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", _hwrm_send_message()
2664 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) hwrm_send_message() argument
2668 mutex_lock(&bp->hwrm_cmd_lock); hwrm_send_message()
2669 rc = _hwrm_send_message(bp, msg, msg_len, timeout); hwrm_send_message()
2670 mutex_unlock(&bp->hwrm_cmd_lock); hwrm_send_message()
2674 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) bnxt_hwrm_func_drv_rgtr() argument
2679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); bnxt_hwrm_func_drv_rgtr()
2695 if (BNXT_PF(bp)) { bnxt_hwrm_func_drv_rgtr()
2710 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_func_drv_rgtr()
2713 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) bnxt_hwrm_tunnel_dst_port_free() argument
2718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); bnxt_hwrm_tunnel_dst_port_free()
2723 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; bnxt_hwrm_tunnel_dst_port_free()
2726 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; bnxt_hwrm_tunnel_dst_port_free()
2732 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_tunnel_dst_port_free()
2734 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", bnxt_hwrm_tunnel_dst_port_free()
2739 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, bnxt_hwrm_tunnel_dst_port_alloc() argument
2744 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_tunnel_dst_port_alloc()
2746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); bnxt_hwrm_tunnel_dst_port_alloc()
2751 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_tunnel_dst_port_alloc()
2752 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_tunnel_dst_port_alloc()
2754 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", bnxt_hwrm_tunnel_dst_port_alloc()
2760 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; bnxt_hwrm_tunnel_dst_port_alloc()
2763 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; bnxt_hwrm_tunnel_dst_port_alloc()
2765 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_tunnel_dst_port_alloc()
2769 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_cfa_l2_set_rx_mask() argument
2772 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_cfa_l2_set_rx_mask()
2774 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); bnxt_hwrm_cfa_l2_set_rx_mask()
2780 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_cfa_l2_set_rx_mask()
2784 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, bnxt_hwrm_cfa_ntuple_filter_free() argument
2789 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); bnxt_hwrm_cfa_ntuple_filter_free()
2791 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_cfa_ntuple_filter_free()
2810 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, bnxt_hwrm_cfa_ntuple_filter_alloc() argument
2816 bp->hwrm_cmd_resp_addr; bnxt_hwrm_cfa_ntuple_filter_alloc()
2818 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; bnxt_hwrm_cfa_ntuple_filter_alloc()
2820 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); bnxt_hwrm_cfa_ntuple_filter_alloc()
2821 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; bnxt_hwrm_cfa_ntuple_filter_alloc()
2841 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_cfa_ntuple_filter_alloc()
2842 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_cfa_ntuple_filter_alloc()
2845 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_cfa_ntuple_filter_alloc()
2850 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, bnxt_hwrm_set_vnic_filter() argument
2855 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_set_vnic_filter()
2857 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); bnxt_hwrm_set_vnic_filter()
2860 req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); bnxt_hwrm_set_vnic_filter()
2873 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_vnic_filter()
2874 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_set_vnic_filter()
2876 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = bnxt_hwrm_set_vnic_filter()
2878 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_vnic_filter()
2882 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) bnxt_hwrm_clear_vnic_filter() argument
2888 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_clear_vnic_filter()
2890 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; bnxt_hwrm_clear_vnic_filter()
2895 bnxt_hwrm_cmd_hdr_init(bp, &req, bnxt_hwrm_clear_vnic_filter()
2900 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_clear_vnic_filter()
2905 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_clear_vnic_filter()
2910 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) bnxt_hwrm_vnic_set_tpa() argument
2912 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_vnic_set_tpa()
2915 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); bnxt_hwrm_vnic_set_tpa()
2918 u16 mss = bp->dev->mtu - 40; bnxt_hwrm_vnic_set_tpa()
2954 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_set_tpa()
2957 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) bnxt_hwrm_vnic_set_rss() argument
2960 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_vnic_set_rss()
2966 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); bnxt_hwrm_vnic_set_rss()
2976 max_rings = bp->rx_nr_rings; bnxt_hwrm_vnic_set_rss()
2992 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_set_rss()
2995 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_vnic_set_hds() argument
2997 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_vnic_set_hds()
3000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); bnxt_hwrm_vnic_set_hds()
3008 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); bnxt_hwrm_vnic_set_hds()
3009 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); bnxt_hwrm_vnic_set_hds()
3011 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_set_hds()
3014 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_vnic_ctx_free_one() argument
3018 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); bnxt_hwrm_vnic_ctx_free_one()
3020 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); bnxt_hwrm_vnic_ctx_free_one()
3022 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_ctx_free_one()
3023 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; bnxt_hwrm_vnic_ctx_free_one()
3026 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) bnxt_hwrm_vnic_ctx_free() argument
3030 for (i = 0; i < bp->nr_vnics; i++) { bnxt_hwrm_vnic_ctx_free()
3031 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; bnxt_hwrm_vnic_ctx_free()
3034 bnxt_hwrm_vnic_ctx_free_one(bp, i); bnxt_hwrm_vnic_ctx_free()
3036 bp->rsscos_nr_ctxs = 0; bnxt_hwrm_vnic_ctx_free()
3039 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_vnic_ctx_alloc() argument
3044 bp->hwrm_cmd_resp_addr; bnxt_hwrm_vnic_ctx_alloc()
3046 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, bnxt_hwrm_vnic_ctx_alloc()
3049 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_vnic_ctx_alloc()
3050 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_ctx_alloc()
3052 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = bnxt_hwrm_vnic_ctx_alloc()
3054 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_vnic_ctx_alloc()
3059 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_vnic_cfg() argument
3062 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; bnxt_hwrm_vnic_cfg()
3065 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); bnxt_hwrm_vnic_cfg()
3077 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); bnxt_hwrm_vnic_cfg()
3080 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + bnxt_hwrm_vnic_cfg()
3083 if (bp->flags & BNXT_FLAG_STRIP_VLAN) bnxt_hwrm_vnic_cfg()
3086 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_cfg()
3089 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) bnxt_hwrm_vnic_free_one() argument
3093 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { bnxt_hwrm_vnic_free_one()
3096 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); bnxt_hwrm_vnic_free_one()
3098 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); bnxt_hwrm_vnic_free_one()
3100 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_free_one()
3103 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; bnxt_hwrm_vnic_free_one()
3108 static void bnxt_hwrm_vnic_free(struct bnxt *bp) bnxt_hwrm_vnic_free() argument
3112 for (i = 0; i < bp->nr_vnics; i++) bnxt_hwrm_vnic_free()
3113 bnxt_hwrm_vnic_free_one(bp, i); bnxt_hwrm_vnic_free()
3116 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, bnxt_hwrm_vnic_alloc() argument
3121 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_vnic_alloc()
3125 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { bnxt_hwrm_vnic_alloc()
3126 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", bnxt_hwrm_vnic_alloc()
3130 bp->vnic_info[vnic_id].fw_grp_ids[j] = bnxt_hwrm_vnic_alloc()
3131 bp->grp_info[i].fw_grp_id; bnxt_hwrm_vnic_alloc()
3134 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; bnxt_hwrm_vnic_alloc()
3138 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); bnxt_hwrm_vnic_alloc()
3140 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_vnic_alloc()
3141 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_vnic_alloc()
3143 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); bnxt_hwrm_vnic_alloc()
3144 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_vnic_alloc()
3148 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) bnxt_hwrm_ring_grp_alloc() argument
3153 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_ring_grp_alloc()
3154 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_ring_grp_alloc()
3157 bp->hwrm_cmd_resp_addr; bnxt_hwrm_ring_grp_alloc()
3159 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); bnxt_hwrm_ring_grp_alloc()
3161 req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); bnxt_hwrm_ring_grp_alloc()
3162 req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); bnxt_hwrm_ring_grp_alloc()
3163 req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); bnxt_hwrm_ring_grp_alloc()
3164 req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); bnxt_hwrm_ring_grp_alloc()
3166 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_ring_grp_alloc()
3171 bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); bnxt_hwrm_ring_grp_alloc()
3173 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_ring_grp_alloc()
3177 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) bnxt_hwrm_ring_grp_free() argument
3183 if (!bp->grp_info) bnxt_hwrm_ring_grp_free()
3186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); bnxt_hwrm_ring_grp_free()
3188 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_ring_grp_free()
3189 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_ring_grp_free()
3190 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) bnxt_hwrm_ring_grp_free()
3193 cpu_to_le32(bp->grp_info[i].fw_grp_id); bnxt_hwrm_ring_grp_free()
3195 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_ring_grp_free()
3199 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; bnxt_hwrm_ring_grp_free()
3201 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_ring_grp_free()
3205 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, hwrm_ring_alloc_send_msg() argument
3212 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; hwrm_ring_alloc_send_msg()
3215 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); hwrm_ring_alloc_send_msg()
3235 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); hwrm_ring_alloc_send_msg()
3236 req.length = cpu_to_le32(bp->tx_ring_mask + 1); hwrm_ring_alloc_send_msg()
3242 req.length = cpu_to_le32(bp->rx_ring_mask + 1); hwrm_ring_alloc_send_msg()
3246 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); hwrm_ring_alloc_send_msg()
3250 req.length = cpu_to_le32(bp->cp_ring_mask + 1); hwrm_ring_alloc_send_msg()
3251 if (bp->flags & BNXT_FLAG_USING_MSIX) hwrm_ring_alloc_send_msg()
3255 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", hwrm_ring_alloc_send_msg()
3260 mutex_lock(&bp->hwrm_cmd_lock); hwrm_ring_alloc_send_msg()
3261 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); hwrm_ring_alloc_send_msg()
3264 mutex_unlock(&bp->hwrm_cmd_lock); hwrm_ring_alloc_send_msg()
3269 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", hwrm_ring_alloc_send_msg()
3274 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", hwrm_ring_alloc_send_msg()
3279 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", hwrm_ring_alloc_send_msg()
3284 netdev_err(bp->dev, "Invalid ring\n"); hwrm_ring_alloc_send_msg()
3292 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_hwrm_ring_alloc() argument
3296 if (bp->cp_nr_rings) { bnxt_hwrm_ring_alloc()
3297 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_ring_alloc()
3298 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_alloc()
3302 rc = hwrm_ring_alloc_send_msg(bp, ring, bnxt_hwrm_ring_alloc()
3307 cpr->cp_doorbell = bp->bar1 + i * 0x80; bnxt_hwrm_ring_alloc()
3309 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; bnxt_hwrm_ring_alloc()
3313 if (bp->tx_nr_rings) { bnxt_hwrm_ring_alloc()
3314 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_hwrm_ring_alloc()
3315 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_alloc()
3318 u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; bnxt_hwrm_ring_alloc()
3320 rc = hwrm_ring_alloc_send_msg(bp, ring, bnxt_hwrm_ring_alloc()
3325 txr->tx_doorbell = bp->bar1 + i * 0x80; bnxt_hwrm_ring_alloc()
3329 if (bp->rx_nr_rings) { bnxt_hwrm_ring_alloc()
3330 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_ring_alloc()
3331 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_alloc()
3335 rc = hwrm_ring_alloc_send_msg(bp, ring, bnxt_hwrm_ring_alloc()
3340 rxr->rx_doorbell = bp->bar1 + i * 0x80; bnxt_hwrm_ring_alloc()
3342 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; bnxt_hwrm_ring_alloc()
3346 if (bp->flags & BNXT_FLAG_AGG_RINGS) { bnxt_hwrm_ring_alloc()
3347 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_ring_alloc()
3348 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_alloc()
3353 rc = hwrm_ring_alloc_send_msg(bp, ring, bnxt_hwrm_ring_alloc()
3355 bp->rx_nr_rings + i, bnxt_hwrm_ring_alloc()
3361 bp->bar1 + (bp->rx_nr_rings + i) * 0x80; bnxt_hwrm_ring_alloc()
3364 bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; bnxt_hwrm_ring_alloc()
3371 static int hwrm_ring_free_send_msg(struct bnxt *bp, hwrm_ring_free_send_msg() argument
3377 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; hwrm_ring_free_send_msg()
3380 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); hwrm_ring_free_send_msg()
3384 mutex_lock(&bp->hwrm_cmd_lock); hwrm_ring_free_send_msg()
3385 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); hwrm_ring_free_send_msg()
3387 mutex_unlock(&bp->hwrm_cmd_lock); hwrm_ring_free_send_msg()
3392 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", hwrm_ring_free_send_msg()
3396 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", hwrm_ring_free_send_msg()
3400 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", hwrm_ring_free_send_msg()
3404 netdev_err(bp->dev, "Invalid ring\n"); hwrm_ring_free_send_msg()
3411 static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) bnxt_hwrm_ring_free() argument
3415 if (!bp->bnapi) bnxt_hwrm_ring_free()
3418 if (bp->tx_nr_rings) { bnxt_hwrm_ring_free()
3419 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_hwrm_ring_free()
3420 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_free()
3423 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; bnxt_hwrm_ring_free()
3427 bp, ring, bnxt_hwrm_ring_free()
3436 if (bp->rx_nr_rings) { bnxt_hwrm_ring_free()
3437 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_ring_free()
3438 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_free()
3441 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; bnxt_hwrm_ring_free()
3445 bp, ring, bnxt_hwrm_ring_free()
3450 bp->grp_info[i].rx_fw_ring_id = bnxt_hwrm_ring_free()
3456 if (bp->rx_agg_nr_pages) { bnxt_hwrm_ring_free()
3457 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_hwrm_ring_free()
3458 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_free()
3462 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; bnxt_hwrm_ring_free()
3466 bp, ring, bnxt_hwrm_ring_free()
3471 bp->grp_info[i].agg_fw_ring_id = bnxt_hwrm_ring_free()
3477 if (bp->cp_nr_rings) { bnxt_hwrm_ring_free()
3478 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_ring_free()
3479 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_ring_free()
3485 bp, ring, bnxt_hwrm_ring_free()
3489 bp->grp_info[i].cp_fw_ring_id = bnxt_hwrm_ring_free()
3498 int bnxt_hwrm_set_coal(struct bnxt *bp) bnxt_hwrm_set_coal() argument
3506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, bnxt_hwrm_set_coal()
3510 max_buf = min_t(u16, bp->coal_bufs / 4, 2); bnxt_hwrm_set_coal()
3513 max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); bnxt_hwrm_set_coal()
3514 buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); bnxt_hwrm_set_coal()
3515 buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); bnxt_hwrm_set_coal()
3522 if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) bnxt_hwrm_set_coal()
3531 req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); bnxt_hwrm_set_coal()
3532 req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); bnxt_hwrm_set_coal()
3534 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_coal()
3535 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_set_coal()
3536 req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); bnxt_hwrm_set_coal()
3538 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_set_coal()
3543 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_coal()
3547 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) bnxt_hwrm_stat_ctx_free() argument
3552 if (!bp->bnapi) bnxt_hwrm_stat_ctx_free()
3555 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); bnxt_hwrm_stat_ctx_free()
3557 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_stat_ctx_free()
3558 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_stat_ctx_free()
3559 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_stat_ctx_free()
3565 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_stat_ctx_free()
3573 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_stat_ctx_free()
3577 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bnxt_hwrm_stat_ctx_alloc() argument
3581 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_stat_ctx_alloc()
3583 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); bnxt_hwrm_stat_ctx_alloc()
3587 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_stat_ctx_alloc()
3588 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_hwrm_stat_ctx_alloc()
3589 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_hwrm_stat_ctx_alloc()
3594 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_hwrm_stat_ctx_alloc()
3601 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; bnxt_hwrm_stat_ctx_alloc()
3603 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_stat_ctx_alloc()
3607 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) bnxt_hwrm_func_qcaps() argument
3611 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_func_qcaps()
3613 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); bnxt_hwrm_func_qcaps()
3616 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_qcaps()
3617 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_func_qcaps()
3621 if (BNXT_PF(bp)) { bnxt_hwrm_func_qcaps()
3622 struct bnxt_pf_info *pf = &bp->pf; bnxt_hwrm_func_qcaps()
3627 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); bnxt_hwrm_func_qcaps()
3647 struct bnxt_vf_info *vf = &bp->vf; bnxt_hwrm_func_qcaps()
3653 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); bnxt_hwrm_func_qcaps()
3655 random_ether_addr(bp->dev->dev_addr); bnxt_hwrm_func_qcaps()
3667 bp->tx_push_thresh = 0; bnxt_hwrm_func_qcaps()
3670 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; bnxt_hwrm_func_qcaps()
3673 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_func_qcaps()
3677 static int bnxt_hwrm_func_reset(struct bnxt *bp) bnxt_hwrm_func_reset() argument
3681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); bnxt_hwrm_func_reset()
3684 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); bnxt_hwrm_func_reset()
3687 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) bnxt_hwrm_queue_qportcfg() argument
3691 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_queue_qportcfg()
3694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); bnxt_hwrm_queue_qportcfg()
3696 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_queue_qportcfg()
3697 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_queue_qportcfg()
3705 bp->max_tc = resp->max_configurable_queues; bnxt_hwrm_queue_qportcfg()
3706 if (bp->max_tc > BNXT_MAX_QUEUE) bnxt_hwrm_queue_qportcfg()
3707 bp->max_tc = BNXT_MAX_QUEUE; bnxt_hwrm_queue_qportcfg()
3710 for (i = 0; i < bp->max_tc; i++) { bnxt_hwrm_queue_qportcfg()
3711 bp->q_info[i].queue_id = *qptr++; bnxt_hwrm_queue_qportcfg()
3712 bp->q_info[i].queue_profile = *qptr++; bnxt_hwrm_queue_qportcfg()
3716 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_queue_qportcfg()
3720 static int bnxt_hwrm_ver_get(struct bnxt *bp) bnxt_hwrm_ver_get() argument
3724 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_ver_get()
3726 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); bnxt_hwrm_ver_get()
3730 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_ver_get()
3731 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_ver_get()
3735 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); bnxt_hwrm_ver_get()
3740 netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", bnxt_hwrm_ver_get()
3744 netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); bnxt_hwrm_ver_get()
3746 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", bnxt_hwrm_ver_get()
3751 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_ver_get()
3755 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) bnxt_hwrm_free_tunnel_ports() argument
3757 if (bp->vxlan_port_cnt) { bnxt_hwrm_free_tunnel_ports()
3759 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); bnxt_hwrm_free_tunnel_ports()
3761 bp->vxlan_port_cnt = 0; bnxt_hwrm_free_tunnel_ports()
3762 if (bp->nge_port_cnt) { bnxt_hwrm_free_tunnel_ports()
3764 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); bnxt_hwrm_free_tunnel_ports()
3766 bp->nge_port_cnt = 0; bnxt_hwrm_free_tunnel_ports()
3769 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) bnxt_set_tpa() argument
3775 tpa_flags = bp->flags & BNXT_FLAG_TPA; bnxt_set_tpa()
3776 for (i = 0; i < bp->nr_vnics; i++) { bnxt_set_tpa()
3777 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); bnxt_set_tpa()
3779 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", bnxt_set_tpa()
3787 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) bnxt_hwrm_clear_vnic_rss() argument
3791 for (i = 0; i < bp->nr_vnics; i++) bnxt_hwrm_clear_vnic_rss()
3792 bnxt_hwrm_vnic_set_rss(bp, i, false); bnxt_hwrm_clear_vnic_rss()
3795 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, bnxt_hwrm_resource_free() argument
3798 if (bp->vnic_info) { bnxt_hwrm_resource_free()
3799 bnxt_hwrm_clear_vnic_filter(bp); bnxt_hwrm_resource_free()
3801 bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_resource_free()
3802 bnxt_hwrm_vnic_ctx_free(bp); bnxt_hwrm_resource_free()
3804 if (bp->flags & BNXT_FLAG_TPA) bnxt_hwrm_resource_free()
3805 bnxt_set_tpa(bp, false); bnxt_hwrm_resource_free()
3806 bnxt_hwrm_vnic_free(bp); bnxt_hwrm_resource_free()
3808 bnxt_hwrm_ring_free(bp, close_path); bnxt_hwrm_resource_free()
3809 bnxt_hwrm_ring_grp_free(bp); bnxt_hwrm_resource_free()
3811 bnxt_hwrm_stat_ctx_free(bp); bnxt_hwrm_resource_free()
3812 bnxt_hwrm_free_tunnel_ports(bp); bnxt_hwrm_resource_free()
3816 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) bnxt_setup_vnic() argument
3821 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); bnxt_setup_vnic()
3823 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", bnxt_setup_vnic()
3827 bp->rsscos_nr_ctxs++; bnxt_setup_vnic()
3830 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); bnxt_setup_vnic()
3832 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", bnxt_setup_vnic()
3838 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); bnxt_setup_vnic()
3840 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", bnxt_setup_vnic()
3845 if (bp->flags & BNXT_FLAG_AGG_RINGS) { bnxt_setup_vnic()
3846 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); bnxt_setup_vnic()
3848 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", bnxt_setup_vnic()
3857 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) bnxt_alloc_rfs_vnics() argument
3862 for (i = 0; i < bp->rx_nr_rings; i++) { bnxt_alloc_rfs_vnics()
3866 if (vnic_id >= bp->nr_vnics) bnxt_alloc_rfs_vnics()
3869 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; bnxt_alloc_rfs_vnics()
3870 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); bnxt_alloc_rfs_vnics()
3872 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", bnxt_alloc_rfs_vnics()
3876 rc = bnxt_setup_vnic(bp, vnic_id); bnxt_alloc_rfs_vnics()
3888 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) bnxt_init_chip() argument
3893 rc = bnxt_hwrm_stat_ctx_alloc(bp); bnxt_init_chip()
3895 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", bnxt_init_chip()
3901 rc = bnxt_hwrm_ring_alloc(bp); bnxt_init_chip()
3903 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); bnxt_init_chip()
3907 rc = bnxt_hwrm_ring_grp_alloc(bp); bnxt_init_chip()
3909 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); bnxt_init_chip()
3914 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); bnxt_init_chip()
3916 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); bnxt_init_chip()
3920 rc = bnxt_setup_vnic(bp, 0); bnxt_init_chip()
3924 if (bp->flags & BNXT_FLAG_RFS) { bnxt_init_chip()
3925 rc = bnxt_alloc_rfs_vnics(bp); bnxt_init_chip()
3930 if (bp->flags & BNXT_FLAG_TPA) { bnxt_init_chip()
3931 rc = bnxt_set_tpa(bp, true); bnxt_init_chip()
3936 if (BNXT_VF(bp)) bnxt_init_chip()
3937 bnxt_update_vf_mac(bp); bnxt_init_chip()
3940 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); bnxt_init_chip()
3942 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); bnxt_init_chip()
3945 bp->vnic_info[0].uc_filter_count = 1; bnxt_init_chip()
3947 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | bnxt_init_chip()
3950 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) bnxt_init_chip()
3951 bp->vnic_info[0].rx_mask |= bnxt_init_chip()
3954 rc = bnxt_cfg_rx_mode(bp); bnxt_init_chip()
3958 rc = bnxt_hwrm_set_coal(bp); bnxt_init_chip()
3960 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", bnxt_init_chip()
3966 bnxt_hwrm_resource_free(bp, 0, true); bnxt_init_chip()
3971 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) bnxt_shutdown_nic() argument
3973 bnxt_hwrm_resource_free(bp, 1, irq_re_init); bnxt_shutdown_nic()
3977 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) bnxt_init_nic() argument
3979 bnxt_init_rx_rings(bp); bnxt_init_nic()
3980 bnxt_init_tx_rings(bp); bnxt_init_nic()
3981 bnxt_init_ring_grps(bp, irq_re_init); bnxt_init_nic()
3982 bnxt_init_vnics(bp); bnxt_init_nic()
3984 return bnxt_init_chip(bp, irq_re_init); bnxt_init_nic()
3987 static void bnxt_disable_int(struct bnxt *bp) bnxt_disable_int() argument
3991 if (!bp->bnapi) bnxt_disable_int()
3994 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_disable_int()
3995 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_disable_int()
4002 static void bnxt_enable_int(struct bnxt *bp) bnxt_enable_int() argument
4006 atomic_set(&bp->intr_sem, 0); bnxt_enable_int()
4007 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_enable_int()
4008 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_enable_int()
4015 static int bnxt_set_real_num_queues(struct bnxt *bp) bnxt_set_real_num_queues() argument
4018 struct net_device *dev = bp->dev; bnxt_set_real_num_queues()
4020 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); bnxt_set_real_num_queues()
4024 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); bnxt_set_real_num_queues()
4029 if (bp->rx_nr_rings) bnxt_set_real_num_queues()
4030 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); bnxt_set_real_num_queues()
4038 static int bnxt_setup_msix(struct bnxt *bp) bnxt_setup_msix() argument
4041 struct net_device *dev = bp->dev; bnxt_setup_msix()
4043 const int len = sizeof(bp->irq_tbl[0].name); bnxt_setup_msix()
4045 bp->flags &= ~BNXT_FLAG_USING_MSIX; bnxt_setup_msix()
4046 total_vecs = bp->cp_nr_rings; bnxt_setup_msix()
4057 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); bnxt_setup_msix()
4063 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); bnxt_setup_msix()
4064 if (bp->irq_tbl) { bnxt_setup_msix()
4068 bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); bnxt_setup_msix()
4069 bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); bnxt_setup_msix()
4070 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bnxt_setup_msix()
4073 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; bnxt_setup_msix()
4074 if (bp->tx_nr_rings_per_tc == 0) { bnxt_setup_msix()
4076 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bnxt_setup_msix()
4080 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bnxt_setup_msix()
4082 count = bp->tx_nr_rings_per_tc; bnxt_setup_msix()
4088 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); bnxt_setup_msix()
4090 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_setup_msix()
4091 bp->irq_tbl[i].vector = msix_ent[i].vector; bnxt_setup_msix()
4092 snprintf(bp->irq_tbl[i].name, len, bnxt_setup_msix()
4094 bp->irq_tbl[i].handler = bnxt_msix; bnxt_setup_msix()
4096 rc = bnxt_set_real_num_queues(bp); bnxt_setup_msix()
4103 bp->flags |= BNXT_FLAG_USING_MSIX; bnxt_setup_msix()
4108 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); bnxt_setup_msix()
4109 pci_disable_msix(bp->pdev); bnxt_setup_msix()
4114 static int bnxt_setup_inta(struct bnxt *bp) bnxt_setup_inta() argument
4117 const int len = sizeof(bp->irq_tbl[0].name); bnxt_setup_inta()
4119 if (netdev_get_num_tc(bp->dev)) bnxt_setup_inta()
4120 netdev_reset_tc(bp->dev); bnxt_setup_inta()
4122 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); bnxt_setup_inta()
4123 if (!bp->irq_tbl) { bnxt_setup_inta()
4127 bp->rx_nr_rings = 1; bnxt_setup_inta()
4128 bp->tx_nr_rings = 1; bnxt_setup_inta()
4129 bp->cp_nr_rings = 1; bnxt_setup_inta()
4130 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bnxt_setup_inta()
4131 bp->irq_tbl[0].vector = bp->pdev->irq; bnxt_setup_inta()
4132 snprintf(bp->irq_tbl[0].name, len, bnxt_setup_inta()
4133 "%s-%s-%d", bp->dev->name, "TxRx", 0); bnxt_setup_inta()
4134 bp->irq_tbl[0].handler = bnxt_inta; bnxt_setup_inta()
4135 rc = bnxt_set_real_num_queues(bp); bnxt_setup_inta()
4139 static int bnxt_setup_int_mode(struct bnxt *bp) bnxt_setup_int_mode() argument
4143 if (bp->flags & BNXT_FLAG_MSIX_CAP) bnxt_setup_int_mode()
4144 rc = bnxt_setup_msix(bp); bnxt_setup_int_mode()
4146 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { bnxt_setup_int_mode()
4148 rc = bnxt_setup_inta(bp); bnxt_setup_int_mode()
4153 static void bnxt_free_irq(struct bnxt *bp) bnxt_free_irq() argument
4159 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); bnxt_free_irq()
4160 bp->dev->rx_cpu_rmap = NULL; bnxt_free_irq()
4162 if (!bp->irq_tbl) bnxt_free_irq()
4165 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_free_irq()
4166 irq = &bp->irq_tbl[i]; bnxt_free_irq()
4168 free_irq(irq->vector, bp->bnapi[i]); bnxt_free_irq()
4171 if (bp->flags & BNXT_FLAG_USING_MSIX) bnxt_free_irq()
4172 pci_disable_msix(bp->pdev); bnxt_free_irq()
4173 kfree(bp->irq_tbl); bnxt_free_irq()
4174 bp->irq_tbl = NULL; bnxt_free_irq()
4177 static int bnxt_request_irq(struct bnxt *bp) bnxt_request_irq() argument
4182 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; bnxt_request_irq()
4185 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) bnxt_request_irq()
4188 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_request_irq()
4189 struct bnxt_irq *irq = &bp->irq_tbl[i]; bnxt_request_irq()
4191 if (rmap && (i < bp->rx_nr_rings)) { bnxt_request_irq()
4194 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", bnxt_request_irq()
4199 bp->bnapi[i]); bnxt_request_irq()
4208 static void bnxt_del_napi(struct bnxt *bp) bnxt_del_napi() argument
4212 if (!bp->bnapi) bnxt_del_napi()
4215 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_del_napi()
4216 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_del_napi()
4223 static void bnxt_init_napi(struct bnxt *bp) bnxt_init_napi() argument
4228 if (bp->flags & BNXT_FLAG_USING_MSIX) { bnxt_init_napi()
4229 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_init_napi()
4230 bnapi = bp->bnapi[i]; bnxt_init_napi()
4231 netif_napi_add(bp->dev, &bnapi->napi, bnxt_init_napi()
4236 bnapi = bp->bnapi[0]; bnxt_init_napi()
4237 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); bnxt_init_napi()
4242 static void bnxt_disable_napi(struct bnxt *bp) bnxt_disable_napi() argument
4246 if (!bp->bnapi) bnxt_disable_napi()
4249 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_disable_napi()
4250 napi_disable(&bp->bnapi[i]->napi); bnxt_disable_napi()
4251 bnxt_disable_poll(bp->bnapi[i]); bnxt_disable_napi()
4255 static void bnxt_enable_napi(struct bnxt *bp) bnxt_enable_napi() argument
4259 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_enable_napi()
4260 bnxt_enable_poll(bp->bnapi[i]); bnxt_enable_napi()
4261 napi_enable(&bp->bnapi[i]->napi); bnxt_enable_napi()
4265 static void bnxt_tx_disable(struct bnxt *bp) bnxt_tx_disable() argument
4272 if (bp->bnapi) { bnxt_tx_disable()
4273 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_tx_disable()
4274 bnapi = bp->bnapi[i]; bnxt_tx_disable()
4276 txq = netdev_get_tx_queue(bp->dev, i); bnxt_tx_disable()
4283 netif_tx_disable(bp->dev); bnxt_tx_disable()
4284 netif_carrier_off(bp->dev); bnxt_tx_disable()
4287 static void bnxt_tx_enable(struct bnxt *bp) bnxt_tx_enable() argument
4294 for (i = 0; i < bp->tx_nr_rings; i++) { bnxt_tx_enable()
4295 bnapi = bp->bnapi[i]; bnxt_tx_enable()
4297 txq = netdev_get_tx_queue(bp->dev, i); bnxt_tx_enable()
4300 netif_tx_wake_all_queues(bp->dev); bnxt_tx_enable()
4301 if (bp->link_info.link_up) bnxt_tx_enable()
4302 netif_carrier_on(bp->dev); bnxt_tx_enable()
4305 static void bnxt_report_link(struct bnxt *bp) bnxt_report_link() argument
4307 if (bp->link_info.link_up) { bnxt_report_link()
4312 netif_carrier_on(bp->dev); bnxt_report_link()
4313 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) bnxt_report_link()
4317 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) bnxt_report_link()
4319 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) bnxt_report_link()
4321 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) bnxt_report_link()
4325 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); bnxt_report_link()
4326 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", bnxt_report_link()
4329 netif_carrier_off(bp->dev); bnxt_report_link()
4330 netdev_err(bp->dev, "NIC Link is Down\n"); bnxt_report_link()
4334 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) bnxt_update_link() argument
4337 struct bnxt_link_info *link_info = &bp->link_info; bnxt_update_link()
4339 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; bnxt_update_link()
4342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); bnxt_update_link()
4344 mutex_lock(&bp->hwrm_cmd_lock); bnxt_update_link()
4345 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_update_link()
4347 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_update_link()
4382 bnxt_report_link(bp); bnxt_update_link()
4387 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_update_link()
4392 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) bnxt_hwrm_set_pause_common() argument
4394 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { bnxt_hwrm_set_pause_common()
4395 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) bnxt_hwrm_set_pause_common()
4397 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) bnxt_hwrm_set_pause_common()
4402 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) bnxt_hwrm_set_pause_common()
4404 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) bnxt_hwrm_set_pause_common()
4411 static void bnxt_hwrm_set_link_common(struct bnxt *bp, bnxt_hwrm_set_link_common() argument
4414 u8 autoneg = bp->link_info.autoneg; bnxt_hwrm_set_link_common()
4415 u16 fw_link_speed = bp->link_info.req_link_speed; bnxt_hwrm_set_link_common()
4416 u32 advertising = bp->link_info.advertising; bnxt_hwrm_set_link_common()
4441 int bnxt_hwrm_set_pause(struct bnxt *bp) bnxt_hwrm_set_pause() argument
4446 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); bnxt_hwrm_set_pause()
4447 bnxt_hwrm_set_pause_common(bp, &req); bnxt_hwrm_set_pause()
4449 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || bnxt_hwrm_set_pause()
4450 bp->link_info.force_link_chng) bnxt_hwrm_set_pause()
4451 bnxt_hwrm_set_link_common(bp, &req); bnxt_hwrm_set_pause()
4453 mutex_lock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_pause()
4454 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_set_pause()
4455 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { bnxt_hwrm_set_pause()
4460 bp->link_info.pause = bnxt_hwrm_set_pause()
4461 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; bnxt_hwrm_set_pause()
4462 bp->link_info.auto_pause_setting = 0; bnxt_hwrm_set_pause()
4463 if (!bp->link_info.force_link_chng) bnxt_hwrm_set_pause()
4464 bnxt_report_link(bp); bnxt_hwrm_set_pause()
4466 bp->link_info.force_link_chng = false; bnxt_hwrm_set_pause()
4467 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_hwrm_set_pause()
4471 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) bnxt_hwrm_set_link_setting() argument
4475 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); bnxt_hwrm_set_link_setting()
4477 bnxt_hwrm_set_pause_common(bp, &req); bnxt_hwrm_set_link_setting()
4479 bnxt_hwrm_set_link_common(bp, &req); bnxt_hwrm_set_link_setting()
4480 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_hwrm_set_link_setting()
4483 static int bnxt_update_phy_setting(struct bnxt *bp) bnxt_update_phy_setting() argument
4488 struct bnxt_link_info *link_info = &bp->link_info; bnxt_update_phy_setting()
4490 rc = bnxt_update_link(bp, true); bnxt_update_phy_setting()
4492 netdev_err(bp->dev, "failed to update link (rc: %x)\n", bnxt_update_phy_setting()
4519 rc = bnxt_hwrm_set_link_setting(bp, update_pause); bnxt_update_phy_setting()
4521 rc = bnxt_hwrm_set_pause(bp); bnxt_update_phy_setting()
4523 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", bnxt_update_phy_setting()
4536 static void bnxt_preset_reg_win(struct bnxt *bp) bnxt_preset_reg_win() argument
4538 if (BNXT_PF(bp)) { bnxt_preset_reg_win()
4541 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); bnxt_preset_reg_win()
4545 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) __bnxt_open_nic() argument
4549 bnxt_preset_reg_win(bp); __bnxt_open_nic()
4550 netif_carrier_off(bp->dev); __bnxt_open_nic()
4552 rc = bnxt_setup_int_mode(bp); __bnxt_open_nic()
4554 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", __bnxt_open_nic()
4559 if ((bp->flags & BNXT_FLAG_RFS) && __bnxt_open_nic()
4560 !(bp->flags & BNXT_FLAG_USING_MSIX)) { __bnxt_open_nic()
4562 bp->dev->hw_features &= ~NETIF_F_NTUPLE; __bnxt_open_nic()
4563 bp->flags &= ~BNXT_FLAG_RFS; __bnxt_open_nic()
4566 rc = bnxt_alloc_mem(bp, irq_re_init); __bnxt_open_nic()
4568 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); __bnxt_open_nic()
4573 bnxt_init_napi(bp); __bnxt_open_nic()
4574 rc = bnxt_request_irq(bp); __bnxt_open_nic()
4576 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); __bnxt_open_nic()
4581 bnxt_enable_napi(bp); __bnxt_open_nic()
4583 rc = bnxt_init_nic(bp, irq_re_init); __bnxt_open_nic()
4585 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); __bnxt_open_nic()
4590 rc = bnxt_update_phy_setting(bp); __bnxt_open_nic()
4597 vxlan_get_rx_port(bp->dev); __bnxt_open_nic()
4600 bp, htons(0x17c1), __bnxt_open_nic()
4602 bp->nge_port_cnt = 1; __bnxt_open_nic()
4605 set_bit(BNXT_STATE_OPEN, &bp->state); __bnxt_open_nic()
4606 bnxt_enable_int(bp); __bnxt_open_nic()
4608 bnxt_tx_enable(bp); __bnxt_open_nic()
4609 mod_timer(&bp->timer, jiffies + bp->current_interval); __bnxt_open_nic()
4614 bnxt_disable_napi(bp); __bnxt_open_nic()
4615 bnxt_del_napi(bp); __bnxt_open_nic()
4618 bnxt_free_skbs(bp); __bnxt_open_nic()
4619 bnxt_free_irq(bp); __bnxt_open_nic()
4620 bnxt_free_mem(bp, true); __bnxt_open_nic()
4625 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_open_nic() argument
4629 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); bnxt_open_nic()
4631 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); bnxt_open_nic()
4632 dev_close(bp->dev); bnxt_open_nic()
4639 struct bnxt *bp = netdev_priv(dev); bnxt_open() local
4642 rc = bnxt_hwrm_func_reset(bp); bnxt_open()
4644 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", bnxt_open()
4649 return __bnxt_open_nic(bp, true, true); bnxt_open()
4652 static void bnxt_disable_int_sync(struct bnxt *bp) bnxt_disable_int_sync() argument
4656 atomic_inc(&bp->intr_sem); bnxt_disable_int_sync()
4657 if (!netif_running(bp->dev)) bnxt_disable_int_sync()
4660 bnxt_disable_int(bp); bnxt_disable_int_sync()
4661 for (i = 0; i < bp->cp_nr_rings; i++) bnxt_disable_int_sync()
4662 synchronize_irq(bp->irq_tbl[i].vector); bnxt_disable_int_sync()
4665 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_close_nic() argument
4670 if (bp->sriov_cfg) { bnxt_close_nic()
4671 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, bnxt_close_nic()
4672 !bp->sriov_cfg, bnxt_close_nic()
4675 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); bnxt_close_nic()
4679 bnxt_tx_disable(bp); bnxt_close_nic()
4681 clear_bit(BNXT_STATE_OPEN, &bp->state); bnxt_close_nic()
4683 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) bnxt_close_nic()
4687 bnxt_shutdown_nic(bp, irq_re_init); bnxt_close_nic()
4691 bnxt_disable_napi(bp); bnxt_close_nic()
4692 bnxt_disable_int_sync(bp); bnxt_close_nic()
4693 del_timer_sync(&bp->timer); bnxt_close_nic()
4694 bnxt_free_skbs(bp); bnxt_close_nic()
4697 bnxt_free_irq(bp); bnxt_close_nic()
4698 bnxt_del_napi(bp); bnxt_close_nic()
4700 bnxt_free_mem(bp, irq_re_init); bnxt_close_nic()
4706 struct bnxt *bp = netdev_priv(dev); bnxt_close() local
4708 bnxt_close_nic(bp, true, true); bnxt_close()
4742 struct bnxt *bp = netdev_priv(dev); bnxt_get_stats64() local
4746 if (!bp->bnapi) bnxt_get_stats64()
4750 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_get_stats64()
4751 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_get_stats64()
4784 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) bnxt_mc_list_updated() argument
4786 struct net_device *dev = bp->dev; bnxt_mc_list_updated()
4787 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; bnxt_mc_list_updated()
4818 static bool bnxt_uc_list_updated(struct bnxt *bp) bnxt_uc_list_updated() argument
4820 struct net_device *dev = bp->dev; bnxt_uc_list_updated()
4821 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; bnxt_uc_list_updated()
4839 struct bnxt *bp = netdev_priv(dev); bnxt_set_rx_mode() local
4840 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; bnxt_set_rx_mode()
4853 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) bnxt_set_rx_mode()
4856 uc_update = bnxt_uc_list_updated(bp); bnxt_set_rx_mode()
4862 mc_update = bnxt_mc_list_updated(bp, &mask); bnxt_set_rx_mode()
4868 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); bnxt_set_rx_mode()
4869 schedule_work(&bp->sp_task); bnxt_set_rx_mode()
4873 static int bnxt_cfg_rx_mode(struct bnxt *bp) bnxt_cfg_rx_mode() argument
4875 struct net_device *dev = bp->dev; bnxt_cfg_rx_mode()
4876 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; bnxt_cfg_rx_mode()
4882 uc_update = bnxt_uc_list_updated(bp); bnxt_cfg_rx_mode()
4888 mutex_lock(&bp->hwrm_cmd_lock); bnxt_cfg_rx_mode()
4892 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, bnxt_cfg_rx_mode()
4897 rc = _hwrm_send_message(bp, &req, sizeof(req), bnxt_cfg_rx_mode()
4900 mutex_unlock(&bp->hwrm_cmd_lock); bnxt_cfg_rx_mode()
4917 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
4919 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4927 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
4929 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4943 struct bnxt *bp = netdev_priv(dev); bnxt_set_features() local
4944 u32 flags = bp->flags; bnxt_set_features()
4951 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) bnxt_set_features()
4962 changes = flags ^ bp->flags; bnxt_set_features()
4965 if ((bp->flags & BNXT_FLAG_TPA) == 0 || bnxt_set_features()
4973 if (flags != bp->flags) { bnxt_set_features()
4974 u32 old_flags = bp->flags; bnxt_set_features()
4976 bp->flags = flags; bnxt_set_features()
4980 bnxt_set_ring_params(bp); bnxt_set_features()
4985 bnxt_close_nic(bp, false, false); bnxt_set_features()
4987 bnxt_set_ring_params(bp); bnxt_set_features()
4989 return bnxt_open_nic(bp, false, false); bnxt_set_features()
4992 rc = bnxt_set_tpa(bp, bnxt_set_features()
4996 bp->flags = old_flags; bnxt_set_features()
5002 static void bnxt_dbg_dump_states(struct bnxt *bp) bnxt_dbg_dump_states() argument
5010 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_dbg_dump_states()
5011 bnapi = bp->bnapi[i]; bnxt_dbg_dump_states()
5015 if (netif_msg_drv(bp)) { bnxt_dbg_dump_states()
5016 netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", bnxt_dbg_dump_states()
5019 netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", bnxt_dbg_dump_states()
5024 netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", bnxt_dbg_dump_states()
5031 static void bnxt_reset_task(struct bnxt *bp) bnxt_reset_task() argument
5033 bnxt_dbg_dump_states(bp); bnxt_reset_task()
5034 if (netif_running(bp->dev)) { bnxt_reset_task()
5035 bnxt_close_nic(bp, false, false); bnxt_reset_task()
5036 bnxt_open_nic(bp, false, false); bnxt_reset_task()
5042 struct bnxt *bp = netdev_priv(dev); bnxt_tx_timeout() local
5044 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); bnxt_tx_timeout()
5045 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); bnxt_tx_timeout()
5046 schedule_work(&bp->sp_task); bnxt_tx_timeout()
5052 struct bnxt *bp = netdev_priv(dev); bnxt_poll_controller() local
5055 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_poll_controller()
5056 struct bnxt_irq *irq = &bp->irq_tbl[i]; bnxt_poll_controller()
5059 irq->handler(irq->vector, bp->bnapi[i]); bnxt_poll_controller()
5067 struct bnxt *bp = (struct bnxt *)data; bnxt_timer() local
5068 struct net_device *dev = bp->dev; bnxt_timer()
5073 if (atomic_read(&bp->intr_sem) != 0) bnxt_timer()
5077 mod_timer(&bp->timer, jiffies + bp->current_interval); bnxt_timer()
5084 struct bnxt *bp = container_of(work, struct bnxt, sp_task); bnxt_sp_task() local
5087 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); bnxt_sp_task()
5089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { bnxt_sp_task()
5090 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); bnxt_sp_task()
5094 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_sp_task()
5095 bnxt_cfg_rx_mode(bp); bnxt_sp_task()
5097 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_sp_task()
5098 bnxt_cfg_ntp_filters(bp); bnxt_sp_task()
5099 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { bnxt_sp_task()
5100 rc = bnxt_update_link(bp, true); bnxt_sp_task()
5102 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", bnxt_sp_task()
5105 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_sp_task()
5106 bnxt_hwrm_exec_fwd_req(bp); bnxt_sp_task()
5107 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { bnxt_sp_task()
5109 bp, bp->vxlan_port, bnxt_sp_task()
5112 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { bnxt_sp_task()
5114 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); bnxt_sp_task()
5116 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { bnxt_sp_task()
5120 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); bnxt_sp_task()
5122 bnxt_reset_task(bp); bnxt_sp_task()
5123 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); bnxt_sp_task()
5128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); bnxt_sp_task()
5134 struct bnxt *bp = netdev_priv(dev); bnxt_init_board() local
5166 bp->dev = dev; bnxt_init_board()
5167 bp->pdev = pdev; bnxt_init_board()
5169 bp->bar0 = pci_ioremap_bar(pdev, 0); bnxt_init_board()
5170 if (!bp->bar0) { bnxt_init_board()
5176 bp->bar1 = pci_ioremap_bar(pdev, 2); bnxt_init_board()
5177 if (!bp->bar1) { bnxt_init_board()
5183 bp->bar2 = pci_ioremap_bar(pdev, 4); bnxt_init_board()
5184 if (!bp->bar2) { bnxt_init_board()
5190 INIT_WORK(&bp->sp_task, bnxt_sp_task); bnxt_init_board()
5192 spin_lock_init(&bp->ntp_fltr_lock); bnxt_init_board()
5194 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bnxt_init_board()
5195 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; bnxt_init_board()
5197 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); bnxt_init_board()
5198 bp->coal_bufs = 20; bnxt_init_board()
5199 bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); bnxt_init_board()
5200 bp->coal_bufs_irq = 2; bnxt_init_board()
5202 init_timer(&bp->timer); bnxt_init_board()
5203 bp->timer.data = (unsigned long)bp; bnxt_init_board()
5204 bp->timer.function = bnxt_timer; bnxt_init_board()
5205 bp->current_interval = BNXT_TIMER_INTERVAL; bnxt_init_board()
5207 clear_bit(BNXT_STATE_OPEN, &bp->state); bnxt_init_board()
5212 if (bp->bar2) { bnxt_init_board()
5213 pci_iounmap(pdev, bp->bar2); bnxt_init_board()
5214 bp->bar2 = NULL; bnxt_init_board()
5217 if (bp->bar1) { bnxt_init_board()
5218 pci_iounmap(pdev, bp->bar1); bnxt_init_board()
5219 bp->bar1 = NULL; bnxt_init_board()
5222 if (bp->bar0) { bnxt_init_board()
5223 pci_iounmap(pdev, bp->bar0); bnxt_init_board()
5224 bp->bar0 = NULL; bnxt_init_board()
5240 struct bnxt *bp = netdev_priv(dev); bnxt_change_mac_addr() local
5247 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) bnxt_change_mac_addr()
5256 bnxt_close_nic(bp, false, false); bnxt_change_mac_addr()
5257 rc = bnxt_open_nic(bp, false, false); bnxt_change_mac_addr()
5266 struct bnxt *bp = netdev_priv(dev); bnxt_change_mtu() local
5272 bnxt_close_nic(bp, false, false); bnxt_change_mtu()
5275 bnxt_set_ring_params(bp); bnxt_change_mtu()
5278 return bnxt_open_nic(bp, false, false); bnxt_change_mtu()
5285 struct bnxt *bp = netdev_priv(dev); bnxt_setup_tc() local
5287 if (tc > bp->max_tc) { bnxt_setup_tc()
5289 tc, bp->max_tc); bnxt_setup_tc()
5299 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bnxt_setup_tc()
5300 if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) bnxt_setup_tc()
5305 if (netif_running(bp->dev)) bnxt_setup_tc()
5306 bnxt_close_nic(bp, true, false); bnxt_setup_tc()
5309 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; bnxt_setup_tc()
5312 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bnxt_setup_tc()
5315 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); bnxt_setup_tc()
5316 bp->num_stat_ctxs = bp->cp_nr_rings; bnxt_setup_tc()
5318 if (netif_running(bp->dev)) bnxt_setup_tc()
5319 return bnxt_open_nic(bp, true, false); bnxt_setup_tc()
5345 struct bnxt *bp = netdev_priv(dev); bnxt_rx_flow_steer() local
5375 head = &bp->ntp_fltr_hash_tbl[idx]; bnxt_rx_flow_steer()
5386 spin_lock_bh(&bp->ntp_fltr_lock);
5387 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5390 spin_unlock_bh(&bp->ntp_fltr_lock);
5399 bp->ntp_fltr_count++;
5400 spin_unlock_bh(&bp->ntp_fltr_lock);
5402 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5403 schedule_work(&bp->sp_task);
5412 static void bnxt_cfg_ntp_filters(struct bnxt *bp) bnxt_cfg_ntp_filters() argument
5422 head = &bp->ntp_fltr_hash_tbl[i]; hlist_for_each_entry_safe()
5427 if (rps_may_expire_flow(bp->dev, fltr->rxq, hlist_for_each_entry_safe()
5430 bnxt_hwrm_cfa_ntuple_filter_free(bp, hlist_for_each_entry_safe()
5435 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, hlist_for_each_entry_safe()
5444 spin_lock_bh(&bp->ntp_fltr_lock); hlist_for_each_entry_safe()
5446 bp->ntp_fltr_count--; hlist_for_each_entry_safe()
5447 spin_unlock_bh(&bp->ntp_fltr_lock); hlist_for_each_entry_safe()
5449 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); hlist_for_each_entry_safe()
5458 static void bnxt_cfg_ntp_filters(struct bnxt *bp) bnxt_cfg_ntp_filters() argument
5467 struct bnxt *bp = netdev_priv(dev); bnxt_add_vxlan_port() local
5475 if (bp->vxlan_port_cnt && bp->vxlan_port != port) bnxt_add_vxlan_port()
5478 bp->vxlan_port_cnt++; bnxt_add_vxlan_port()
5479 if (bp->vxlan_port_cnt == 1) { bnxt_add_vxlan_port()
5480 bp->vxlan_port = port; bnxt_add_vxlan_port()
5481 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); bnxt_add_vxlan_port()
5482 schedule_work(&bp->sp_task); bnxt_add_vxlan_port()
5489 struct bnxt *bp = netdev_priv(dev); bnxt_del_vxlan_port() local
5497 if (bp->vxlan_port_cnt && bp->vxlan_port == port) { bnxt_del_vxlan_port()
5498 bp->vxlan_port_cnt--; bnxt_del_vxlan_port()
5500 if (bp->vxlan_port_cnt == 0) { bnxt_del_vxlan_port()
5501 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); bnxt_del_vxlan_port()
5502 schedule_work(&bp->sp_task); bnxt_del_vxlan_port()
5545 struct bnxt *bp = netdev_priv(dev); bnxt_remove_one() local
5547 if (BNXT_PF(bp)) bnxt_remove_one()
5548 bnxt_sriov_disable(bp); bnxt_remove_one()
5551 cancel_work_sync(&bp->sp_task); bnxt_remove_one()
5552 bp->sp_event = 0; bnxt_remove_one()
5554 bnxt_free_hwrm_resources(bp); bnxt_remove_one()
5555 pci_iounmap(pdev, bp->bar2); bnxt_remove_one()
5556 pci_iounmap(pdev, bp->bar1); bnxt_remove_one()
5557 pci_iounmap(pdev, bp->bar0); bnxt_remove_one()
5564 static int bnxt_probe_phy(struct bnxt *bp) bnxt_probe_phy() argument
5567 struct bnxt_link_info *link_info = &bp->link_info; bnxt_probe_phy()
5570 rc = bnxt_update_link(bp, false); bnxt_probe_phy()
5572 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", bnxt_probe_phy()
5598 strcat(bp->fw_ver_str, phy_ver); bnxt_probe_phy()
5613 void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) bnxt_get_max_rings() argument
5617 if (BNXT_PF(bp)) { bnxt_get_max_rings()
5618 *max_tx = bp->pf.max_pf_tx_rings; bnxt_get_max_rings()
5619 *max_rx = bp->pf.max_pf_rx_rings; bnxt_get_max_rings()
5620 max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); bnxt_get_max_rings()
5621 max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); bnxt_get_max_rings()
5624 *max_tx = bp->vf.max_tx_rings; bnxt_get_max_rings()
5625 *max_rx = bp->vf.max_rx_rings; bnxt_get_max_rings()
5626 max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); bnxt_get_max_rings()
5627 max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); bnxt_get_max_rings()
5630 if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_get_max_rings()
5641 struct bnxt *bp; bnxt_init_one() local
5648 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); bnxt_init_one()
5652 bp = netdev_priv(dev); bnxt_init_one()
5655 bp->flags |= BNXT_FLAG_VF; bnxt_init_one()
5658 bp->flags |= BNXT_FLAG_MSIX_CAP; bnxt_init_one()
5659 if (BNXT_PF(bp)) bnxt_init_one()
5660 bp->flags |= BNXT_FLAG_RFS; bnxt_init_one()
5680 if (bp->flags & BNXT_FLAG_RFS) bnxt_init_one()
5695 init_waitqueue_head(&bp->sriov_cfg_wait); bnxt_init_one()
5697 rc = bnxt_alloc_hwrm_resources(bp); bnxt_init_one()
5701 mutex_init(&bp->hwrm_cmd_lock); bnxt_init_one()
5702 bnxt_hwrm_ver_get(bp); bnxt_init_one()
5704 rc = bnxt_hwrm_func_drv_rgtr(bp); bnxt_init_one()
5709 rc = bnxt_hwrm_func_qcaps(bp); bnxt_init_one()
5711 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", bnxt_init_one()
5717 rc = bnxt_hwrm_queue_qportcfg(bp); bnxt_init_one()
5719 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", bnxt_init_one()
5725 bnxt_set_tpa_flags(bp); bnxt_init_one()
5726 bnxt_set_ring_params(bp); bnxt_init_one()
5728 if (BNXT_PF(bp)) bnxt_init_one()
5729 bp->pf.max_irqs = max_irqs; bnxt_init_one()
5732 bp->vf.max_irqs = max_irqs; bnxt_init_one()
5734 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bnxt_init_one()
5735 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bnxt_init_one()
5736 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); bnxt_init_one()
5737 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bnxt_init_one()
5738 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); bnxt_init_one()
5739 bp->num_stat_ctxs = bp->cp_nr_rings; bnxt_init_one()
5742 bp->flags |= BNXT_FLAG_STRIP_VLAN; bnxt_init_one()
5744 rc = bnxt_probe_phy(bp); bnxt_init_one()
5759 pci_iounmap(pdev, bp->bar0); bnxt_init_one()
H A Dbnxt_ethtool.c25 struct bnxt *bp = netdev_priv(dev); bnxt_get_msglevel() local
27 return bp->msg_enable; bnxt_get_msglevel()
32 struct bnxt *bp = netdev_priv(dev); bnxt_set_msglevel() local
34 bp->msg_enable = value; bnxt_set_msglevel()
40 struct bnxt *bp = netdev_priv(dev); bnxt_get_coalesce() local
45 max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1); bnxt_get_coalesce()
46 coal->rx_max_coalesced_frames = bp->coal_bufs / 2; bnxt_get_coalesce()
48 max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1); bnxt_get_coalesce()
49 coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2; bnxt_get_coalesce()
57 struct bnxt *bp = netdev_priv(dev); bnxt_set_coalesce() local
60 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs); bnxt_set_coalesce()
61 bp->coal_bufs = coal->rx_max_coalesced_frames * 2; bnxt_set_coalesce()
62 bp->coal_ticks_irq = bnxt_set_coalesce()
64 bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; bnxt_set_coalesce()
67 rc = bnxt_hwrm_set_coal(bp); bnxt_set_coalesce()
76 struct bnxt *bp = netdev_priv(dev); bnxt_get_sset_count() local
80 return BNXT_NUM_STATS * bp->cp_nr_rings; bnxt_get_sset_count()
90 struct bnxt *bp = netdev_priv(dev); bnxt_get_ethtool_stats() local
91 u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; bnxt_get_ethtool_stats()
96 if (!bp->bnapi) bnxt_get_ethtool_stats()
99 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_get_ethtool_stats()
100 struct bnxt_napi *bnapi = bp->bnapi[i]; bnxt_get_ethtool_stats()
113 struct bnxt *bp = netdev_priv(dev); bnxt_get_strings() local
119 for (i = 0; i < bp->cp_nr_rings; i++) { bnxt_get_strings()
165 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", bnxt_get_strings()
174 struct bnxt *bp = netdev_priv(dev); bnxt_get_ringparam() local
180 ering->rx_pending = bp->rx_ring_size; bnxt_get_ringparam()
181 ering->rx_jumbo_pending = bp->rx_agg_ring_size; bnxt_get_ringparam()
182 ering->tx_pending = bp->tx_ring_size; bnxt_get_ringparam()
188 struct bnxt *bp = netdev_priv(dev); bnxt_set_ringparam() local
196 bnxt_close_nic(bp, false, false); bnxt_set_ringparam()
198 bp->rx_ring_size = ering->rx_pending; bnxt_set_ringparam()
199 bp->tx_ring_size = ering->tx_pending; bnxt_set_ringparam()
200 bnxt_set_ring_params(bp); bnxt_set_ringparam()
203 return bnxt_open_nic(bp, false, false); bnxt_set_ringparam()
211 struct bnxt *bp = netdev_priv(dev); bnxt_get_channels() local
214 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bnxt_get_channels()
223 channel->rx_count = bp->rx_nr_rings; bnxt_get_channels()
224 channel->tx_count = bp->tx_nr_rings_per_tc; bnxt_get_channels()
230 struct bnxt *bp = netdev_priv(dev); bnxt_set_channels() local
238 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bnxt_set_channels()
248 if (BNXT_PF(bp)) { bnxt_set_channels()
253 rc = bnxt_close_nic(bp, true, false); bnxt_set_channels()
255 netdev_err(bp->dev, "Set channel failure rc :%x\n", bnxt_set_channels()
261 bp->rx_nr_rings = channel->rx_count; bnxt_set_channels()
262 bp->tx_nr_rings_per_tc = channel->tx_count; bnxt_set_channels()
263 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bnxt_set_channels()
265 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bnxt_set_channels()
266 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); bnxt_set_channels()
267 bp->num_stat_ctxs = bp->cp_nr_rings; bnxt_set_channels()
270 rc = bnxt_open_nic(bp, true, false); bnxt_set_channels()
271 if ((!rc) && BNXT_PF(bp)) { bnxt_set_channels()
282 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, bnxt_grxclsrlall() argument
287 cmd->data = bp->ntp_fltr_count; bnxt_grxclsrlall()
292 head = &bp->ntp_fltr_hash_tbl[i]; bnxt_grxclsrlall()
307 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) bnxt_grxclsrule() argument
321 head = &bp->ntp_fltr_hash_tbl[i]; bnxt_grxclsrule()
364 struct bnxt *bp = netdev_priv(dev); bnxt_get_rxnfc() local
369 cmd->data = bp->rx_nr_rings; bnxt_get_rxnfc()
373 cmd->rule_cnt = bp->ntp_fltr_count; bnxt_get_rxnfc()
378 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); bnxt_get_rxnfc()
382 rc = bnxt_grxclsrule(bp, cmd); bnxt_get_rxnfc()
407 struct bnxt *bp = netdev_priv(dev); bnxt_get_rxfh() local
408 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; bnxt_get_rxfh()
427 struct bnxt *bp = netdev_priv(dev); bnxt_get_drvinfo() local
431 strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); bnxt_get_drvinfo()
432 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); bnxt_get_drvinfo()
433 info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; bnxt_get_drvinfo()
434 info->testinfo_len = BNXT_NUM_TESTS(bp); bnxt_get_drvinfo()
520 struct bnxt *bp = netdev_priv(dev); bnxt_get_settings() local
521 struct bnxt_link_info *link_info = &bp->link_info; bnxt_get_settings()
644 struct bnxt *bp = netdev_priv(dev); bnxt_set_settings() local
645 struct bnxt_link_info *link_info = &bp->link_info; bnxt_set_settings()
649 if (BNXT_VF(bp)) bnxt_set_settings()
702 rc = bnxt_hwrm_set_link_setting(bp, set_pause); bnxt_set_settings()
711 struct bnxt *bp = netdev_priv(dev); bnxt_get_pauseparam() local
712 struct bnxt_link_info *link_info = &bp->link_info; bnxt_get_pauseparam()
714 if (BNXT_VF(bp)) bnxt_get_pauseparam()
726 struct bnxt *bp = netdev_priv(dev); bnxt_set_pauseparam() local
727 struct bnxt_link_info *link_info = &bp->link_info; bnxt_set_pauseparam()
729 if (BNXT_VF(bp)) bnxt_set_pauseparam()
755 rc = bnxt_hwrm_set_pause(bp); bnxt_set_pauseparam()
761 struct bnxt *bp = netdev_priv(dev); bnxt_get_link() local
764 return bp->link_info.link_up; bnxt_get_link()
775 struct bnxt *bp = netdev_priv(dev); bnxt_flash_nvram() local
781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); bnxt_flash_nvram()
789 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, bnxt_flash_nvram()
799 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); bnxt_flash_nvram()
800 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); bnxt_flash_nvram()
954 struct bnxt *bp = netdev_priv(dev); nvm_get_dir_info() local
957 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; nvm_get_dir_info()
959 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); nvm_get_dir_info()
961 mutex_lock(&bp->hwrm_cmd_lock); nvm_get_dir_info()
962 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); nvm_get_dir_info()
967 mutex_unlock(&bp->hwrm_cmd_lock); nvm_get_dir_info()
981 struct bnxt *bp = netdev_priv(dev); bnxt_get_nvram_directory() local
1004 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, bnxt_get_nvram_directory()
1011 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); bnxt_get_nvram_directory()
1013 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_get_nvram_directory()
1016 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); bnxt_get_nvram_directory()
1023 struct bnxt *bp = netdev_priv(dev); bnxt_get_nvram_item() local
1029 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, bnxt_get_nvram_item()
1036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); bnxt_get_nvram_item()
1042 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_get_nvram_item()
1045 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); bnxt_get_nvram_item()
1072 struct bnxt *bp = netdev_priv(dev); bnxt_erase_nvram_directory() local
1075 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); bnxt_erase_nvram_directory()
1077 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); bnxt_erase_nvram_directory()
1084 struct bnxt *bp = netdev_priv(dev); bnxt_set_eeprom() local
1088 if (!BNXT_PF(bp)) { bnxt_set_eeprom()
H A Dbnxt.h412 #define BNXT_NUM_TESTS(bp) 0
453 !((raw_cons) & bp->cp_bit))
457 !((raw_cons) & bp->cp_bit))
461 !((raw_cons) & bp->cp_bit))
469 #define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
471 #define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
473 #define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
477 #define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
603 struct bnxt *bp; member in struct:bnxt_napi
882 #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
883 #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
/linux-4.4.14/kernel/debug/kdb/
H A Dkdb_bp.c40 static char *kdb_bptype(kdb_bp_t *bp) kdb_bptype() argument
42 if (bp->bp_type < 0 || bp->bp_type > 4) kdb_bptype()
45 return kdb_rwtypes[bp->bp_type]; kdb_bptype()
48 static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp) kdb_parsebp() argument
53 bp->bph_length = 1; kdb_parsebp()
56 bp->bp_type = BP_ACCESS_WATCHPOINT; kdb_parsebp()
58 bp->bp_type = BP_WRITE_WATCHPOINT; kdb_parsebp()
60 bp->bp_type = BP_HARDWARE_BREAKPOINT; kdb_parsebp()
64 bp->bph_length = 1; kdb_parsebp()
80 bp->bph_length = len; kdb_parsebp()
92 static int _kdb_bp_remove(kdb_bp_t *bp) _kdb_bp_remove() argument
95 if (!bp->bp_installed) _kdb_bp_remove()
97 if (!bp->bp_type) _kdb_bp_remove()
98 ret = dbg_remove_sw_break(bp->bp_addr); _kdb_bp_remove()
100 ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr, _kdb_bp_remove()
101 bp->bph_length, _kdb_bp_remove()
102 bp->bp_type); _kdb_bp_remove()
104 bp->bp_installed = 0; _kdb_bp_remove()
108 static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp) kdb_handle_bp() argument
121 bp->bp_delay = 0; kdb_handle_bp()
122 bp->bp_delayed = 1; kdb_handle_bp()
125 static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp) _kdb_bp_install() argument
134 __func__, bp->bp_installed); _kdb_bp_install()
136 bp->bp_delay = 0; _kdb_bp_install()
137 if (bp->bp_installed) _kdb_bp_install()
139 if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) { _kdb_bp_install()
141 kdb_printf("%s: delayed bp\n", __func__); _kdb_bp_install()
142 kdb_handle_bp(regs, bp); _kdb_bp_install()
145 if (!bp->bp_type) _kdb_bp_install()
146 ret = dbg_set_sw_break(bp->bp_addr); _kdb_bp_install()
148 ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr, _kdb_bp_install()
149 bp->bph_length, _kdb_bp_install()
150 bp->bp_type); _kdb_bp_install()
152 bp->bp_installed = 1; _kdb_bp_install()
155 __func__, bp->bp_addr); _kdb_bp_install()
157 if (!bp->bp_type) { _kdb_bp_install()
181 kdb_bp_t *bp = &kdb_breakpoints[i]; kdb_bp_install() local
184 kdb_printf("%s: bp %d bp_enabled %d\n", kdb_bp_install()
185 __func__, i, bp->bp_enabled); kdb_bp_install()
187 if (bp->bp_enabled) kdb_bp_install()
188 _kdb_bp_install(regs, bp); kdb_bp_install()
212 kdb_bp_t *bp = &kdb_breakpoints[i]; kdb_bp_remove() local
215 kdb_printf("%s: bp %d bp_enabled %d\n", kdb_bp_remove()
216 __func__, i, bp->bp_enabled); kdb_bp_remove()
218 if (bp->bp_enabled) kdb_bp_remove()
219 _kdb_bp_remove(bp); kdb_bp_remove()
240 static void kdb_printbp(kdb_bp_t *bp, int i) kdb_printbp() argument
242 kdb_printf("%s ", kdb_bptype(bp)); kdb_printbp()
244 kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT); kdb_printbp()
246 if (bp->bp_enabled) kdb_printbp()
252 bp->bp_addr, bp->bp_type, bp->bp_installed); kdb_printbp()
260 * Handle the bp commands.
262 * [bp|bph] <addr-expression> [DATAR|DATAW]
275 * bp Set breakpoint on all cpus. Only use hardware assist if need.
282 kdb_bp_t *bp, *bp_check; kdb_bp() local
293 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; kdb_bp()
294 bpno++, bp++) { kdb_bp()
295 if (bp->bp_free) kdb_bp()
297 kdb_printbp(bp, bpno); kdb_bp()
312 * Find an empty bp structure to allocate kdb_bp()
314 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { kdb_bp()
315 if (bp->bp_free) kdb_bp()
352 *bp = template; kdb_bp()
353 bp->bp_free = 0; kdb_bp()
355 kdb_printbp(bp, bpno); kdb_bp()
382 kdb_bp_t *bp = NULL; kdb_bc() local
417 bp = &kdb_breakpoints[addr]; kdb_bc()
421 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; kdb_bc()
422 i++, bp++) { kdb_bc()
423 if (bp->bp_addr == addr) { kdb_bc()
436 for (bp = &kdb_breakpoints[lowbp], i = lowbp; kdb_bc()
438 i++, bp++) { kdb_bc()
439 if (bp->bp_free) kdb_bc()
446 bp->bp_enabled = 0; kdb_bc()
450 i, bp->bp_addr); kdb_bc()
452 bp->bp_addr = 0; kdb_bc()
453 bp->bp_free = 1; kdb_bc()
457 bp->bp_enabled = 1; kdb_bc()
461 i, bp->bp_addr); kdb_bc()
466 if (!bp->bp_enabled) kdb_bc()
469 bp->bp_enabled = 0; kdb_bc()
473 i, bp->bp_addr); kdb_bc()
477 if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) { kdb_bc()
478 bp->bp_delay = 0; kdb_bc()
524 kdb_bp_t *bp; kdb_initbptab() local
531 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) kdb_initbptab()
532 bp->bp_free = 1; kdb_initbptab()
534 kdb_register_flags("bp", kdb_bp, "[<vaddr>]", kdb_initbptab()
H A Dkdb_debugger.c56 kdb_bp_t *bp; kdb_stub() local
78 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { kdb_stub()
79 if ((bp->bp_enabled) && (bp->bp_addr == addr)) { kdb_stub()
88 for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { kdb_stub()
89 if (bp->bp_free) kdb_stub()
91 if (bp->bp_addr == addr) { kdb_stub()
92 bp->bp_delay = 1; kdb_stub()
93 bp->bp_delayed = 1; kdb_stub()
/linux-4.4.14/drivers/net/ethernet/sun/
H A Dsunbmac.c97 static void qec_init(struct bigmac *bp) qec_init() argument
99 struct platform_device *qec_op = bp->qec_op; qec_init()
100 void __iomem *gregs = bp->gregs; qec_init()
101 u8 bsizes = bp->bigmac_bursts; qec_init()
164 static void bigmac_stop(struct bigmac *bp) bigmac_stop() argument
166 bigmac_tx_reset(bp->bregs); bigmac_stop()
167 bigmac_rx_reset(bp->bregs); bigmac_stop()
170 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) bigmac_get_counters() argument
172 struct net_device_stats *stats = &bp->enet_stats; bigmac_get_counters()
192 static void bigmac_clean_rings(struct bigmac *bp) bigmac_clean_rings() argument
197 if (bp->rx_skbs[i] != NULL) { bigmac_clean_rings()
198 dev_kfree_skb_any(bp->rx_skbs[i]); bigmac_clean_rings()
199 bp->rx_skbs[i] = NULL; bigmac_clean_rings()
204 if (bp->tx_skbs[i] != NULL) { bigmac_clean_rings()
205 dev_kfree_skb_any(bp->tx_skbs[i]); bigmac_clean_rings()
206 bp->tx_skbs[i] = NULL; bigmac_clean_rings()
211 static void bigmac_init_rings(struct bigmac *bp, int from_irq) bigmac_init_rings() argument
213 struct bmac_init_block *bb = bp->bmac_block; bigmac_init_rings()
220 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; bigmac_init_rings()
223 bigmac_clean_rings(bp); bigmac_init_rings()
233 bp->rx_skbs[i] = skb; bigmac_init_rings()
240 dma_map_single(&bp->bigmac_op->dev, bigmac_init_rings()
267 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) write_tcvr_bit() argument
269 if (bp->tcvr_type == internal) { write_tcvr_bit()
277 } else if (bp->tcvr_type == external) { write_tcvr_bit()
290 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) read_tcvr_bit() argument
294 if (bp->tcvr_type == internal) { read_tcvr_bit()
301 } else if (bp->tcvr_type == external) { read_tcvr_bit()
313 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) read_tcvr_bit2() argument
317 if (bp->tcvr_type == internal) { read_tcvr_bit2()
323 } else if (bp->tcvr_type == external) { read_tcvr_bit2()
335 static void put_tcvr_byte(struct bigmac *bp, put_tcvr_byte() argument
342 write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); put_tcvr_byte()
347 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, bigmac_tcvr_write() argument
354 switch(bp->tcvr_type) { bigmac_tcvr_write()
365 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write()
366 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write()
367 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write()
368 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write()
370 put_tcvr_byte(bp, tregs, bigmac_tcvr_write()
371 ((bp->tcvr_type == internal) ? bigmac_tcvr_write()
374 put_tcvr_byte(bp, tregs, reg); bigmac_tcvr_write()
376 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_write()
377 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_write()
381 write_tcvr_bit(bp, tregs, (val >> shift) & 1); bigmac_tcvr_write()
386 static unsigned short bigmac_tcvr_read(struct bigmac *bp, bigmac_tcvr_read() argument
393 switch(bp->tcvr_type) { bigmac_tcvr_read()
404 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_read()
405 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_read()
406 write_tcvr_bit(bp, tregs, 1); bigmac_tcvr_read()
407 write_tcvr_bit(bp, tregs, 0); bigmac_tcvr_read()
409 put_tcvr_byte(bp, tregs, bigmac_tcvr_read()
410 ((bp->tcvr_type == internal) ? bigmac_tcvr_read()
413 put_tcvr_byte(bp, tregs, reg); bigmac_tcvr_read()
415 if (bp->tcvr_type == external) { bigmac_tcvr_read()
418 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
419 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
424 tmp = read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
429 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
430 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
431 (void) read_tcvr_bit2(bp, tregs); bigmac_tcvr_read()
435 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
436 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
441 tmp = read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
446 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
447 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
448 (void) read_tcvr_bit(bp, tregs); bigmac_tcvr_read()
453 static void bigmac_tcvr_init(struct bigmac *bp) bigmac_tcvr_init() argument
455 void __iomem *tregs = bp->tregs; bigmac_tcvr_init()
472 bp->tcvr_type = external; bigmac_tcvr_init()
477 bp->tcvr_type = internal; bigmac_tcvr_init()
493 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) try_next_permutation() argument
495 if (bp->sw_bmcr & BMCR_SPEED100) { try_next_permutation()
499 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); try_next_permutation()
500 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation()
501 bp->sw_bmcr = (BMCR_RESET); try_next_permutation()
502 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation()
506 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); try_next_permutation()
507 if ((bp->sw_bmcr & BMCR_RESET) == 0) try_next_permutation()
512 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); try_next_permutation()
514 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); try_next_permutation()
517 bp->sw_bmcr &= ~(BMCR_SPEED100); try_next_permutation()
518 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); try_next_permutation()
528 struct bigmac *bp = (struct bigmac *) data; bigmac_timer() local
529 void __iomem *tregs = bp->tregs; bigmac_timer()
532 bp->timer_ticks++; bigmac_timer()
533 if (bp->timer_state == ltrywait) { bigmac_timer()
534 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bigmac_timer()
535 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_timer()
536 if (bp->sw_bmsr & BMSR_LSTATUS) { bigmac_timer()
538 bp->dev->name, bigmac_timer()
539 (bp->sw_bmcr & BMCR_SPEED100) ? bigmac_timer()
541 bp->timer_state = asleep; bigmac_timer()
544 if (bp->timer_ticks >= 4) { bigmac_timer()
547 ret = try_next_permutation(bp, tregs); bigmac_timer()
550 bp->dev->name); bigmac_timer()
551 ret = bigmac_init_hw(bp, 0); bigmac_timer()
554 "BigMAC.\n", bp->dev->name); bigmac_timer()
558 bp->timer_ticks = 0; bigmac_timer()
567 bp->dev->name); bigmac_timer()
569 bp->timer_ticks = 0; bigmac_timer()
570 bp->timer_state = asleep; /* foo on you */ bigmac_timer()
574 bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ bigmac_timer()
575 add_timer(&bp->bigmac_timer); bigmac_timer()
582 static void bigmac_begin_auto_negotiation(struct bigmac *bp) bigmac_begin_auto_negotiation() argument
584 void __iomem *tregs = bp->tregs; bigmac_begin_auto_negotiation()
588 bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); bigmac_begin_auto_negotiation()
589 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation()
592 bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_begin_auto_negotiation()
593 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation()
594 bp->sw_bmcr = (BMCR_RESET); bigmac_begin_auto_negotiation()
595 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation()
599 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation()
600 if ((bp->sw_bmcr & BMCR_RESET) == 0) bigmac_begin_auto_negotiation()
605 printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bigmac_begin_auto_negotiation()
607 bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); bigmac_begin_auto_negotiation()
610 bp->sw_bmcr |= BMCR_SPEED100; bigmac_begin_auto_negotiation()
611 bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); bigmac_begin_auto_negotiation()
613 bp->timer_state = ltrywait; bigmac_begin_auto_negotiation()
614 bp->timer_ticks = 0; bigmac_begin_auto_negotiation()
615 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bigmac_begin_auto_negotiation()
616 bp->bigmac_timer.data = (unsigned long) bp; bigmac_begin_auto_negotiation()
617 bp->bigmac_timer.function = bigmac_timer; bigmac_begin_auto_negotiation()
618 add_timer(&bp->bigmac_timer); bigmac_begin_auto_negotiation()
621 static int bigmac_init_hw(struct bigmac *bp, int from_irq) bigmac_init_hw() argument
623 void __iomem *gregs = bp->gregs; bigmac_init_hw()
624 void __iomem *cregs = bp->creg; bigmac_init_hw()
625 void __iomem *bregs = bp->bregs; bigmac_init_hw()
626 unsigned char *e = &bp->dev->dev_addr[0]; bigmac_init_hw()
629 bigmac_get_counters(bp, bregs); bigmac_init_hw()
635 qec_init(bp); bigmac_init_hw()
638 bigmac_init_rings(bp, from_irq); bigmac_init_hw()
641 bigmac_tcvr_init(bp); bigmac_init_hw()
644 bigmac_stop(bp); bigmac_init_hw()
674 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), bigmac_init_hw()
676 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), bigmac_init_hw()
710 bigmac_begin_auto_negotiation(bp); bigmac_init_hw()
717 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) bigmac_is_medium_rare() argument
752 bigmac_init_hw(bp, 1); bigmac_is_medium_rare()
756 static void bigmac_tx(struct bigmac *bp) bigmac_tx() argument
758 struct be_txd *txbase = &bp->bmac_block->be_txd[0]; bigmac_tx()
759 struct net_device *dev = bp->dev; bigmac_tx()
762 spin_lock(&bp->lock); bigmac_tx()
764 elem = bp->tx_old; bigmac_tx()
766 while (elem != bp->tx_new) { bigmac_tx()
775 skb = bp->tx_skbs[elem]; bigmac_tx()
776 bp->enet_stats.tx_packets++; bigmac_tx()
777 bp->enet_stats.tx_bytes += skb->len; bigmac_tx()
778 dma_unmap_single(&bp->bigmac_op->dev, bigmac_tx()
783 bp->tx_skbs[elem] = NULL; bigmac_tx()
789 bp->tx_old = elem; bigmac_tx()
792 TX_BUFFS_AVAIL(bp) > 0) bigmac_tx()
793 netif_wake_queue(bp->dev); bigmac_tx()
795 spin_unlock(&bp->lock); bigmac_tx()
799 static void bigmac_rx(struct bigmac *bp) bigmac_rx() argument
801 struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; bigmac_rx()
803 int elem = bp->rx_new, drops = 0; bigmac_rx()
813 bp->enet_stats.rx_errors++; bigmac_rx()
814 bp->enet_stats.rx_length_errors++; bigmac_rx()
818 bp->enet_stats.rx_dropped++; bigmac_rx()
823 skb = bp->rx_skbs[elem]; bigmac_rx()
833 dma_unmap_single(&bp->bigmac_op->dev, bigmac_rx()
837 bp->rx_skbs[elem] = new_skb; bigmac_rx()
841 dma_map_single(&bp->bigmac_op->dev, bigmac_rx()
851 struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); bigmac_rx()
859 dma_sync_single_for_cpu(&bp->bigmac_op->dev, bigmac_rx()
863 dma_sync_single_for_device(&bp->bigmac_op->dev, bigmac_rx()
875 skb->protocol = eth_type_trans(skb, bp->dev); bigmac_rx()
877 bp->enet_stats.rx_packets++; bigmac_rx()
878 bp->enet_stats.rx_bytes += len; bigmac_rx()
883 bp->rx_new = elem; bigmac_rx()
885 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); bigmac_rx()
890 struct bigmac *bp = (struct bigmac *) dev_id; bigmac_interrupt() local
896 bmac_status = sbus_readl(bp->creg + CREG_STAT); bigmac_interrupt()
897 qec_status = sbus_readl(bp->gregs + GLOB_STAT); bigmac_interrupt()
902 bigmac_is_medium_rare(bp, qec_status, bmac_status); bigmac_interrupt()
905 bigmac_tx(bp); bigmac_interrupt()
908 bigmac_rx(bp); bigmac_interrupt()
915 struct bigmac *bp = netdev_priv(dev); bigmac_open() local
918 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); bigmac_open()
923 init_timer(&bp->bigmac_timer); bigmac_open()
924 ret = bigmac_init_hw(bp, 0); bigmac_open()
926 free_irq(dev->irq, bp); bigmac_open()
932 struct bigmac *bp = netdev_priv(dev); bigmac_close() local
934 del_timer(&bp->bigmac_timer); bigmac_close()
935 bp->timer_state = asleep; bigmac_close()
936 bp->timer_ticks = 0; bigmac_close()
938 bigmac_stop(bp); bigmac_close()
939 bigmac_clean_rings(bp); bigmac_close()
940 free_irq(dev->irq, bp); bigmac_close()
946 struct bigmac *bp = netdev_priv(dev); bigmac_tx_timeout() local
948 bigmac_init_hw(bp, 0); bigmac_tx_timeout()
955 struct bigmac *bp = netdev_priv(dev); bigmac_start_xmit() local
960 mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, bigmac_start_xmit()
964 spin_lock_irq(&bp->lock); bigmac_start_xmit()
965 entry = bp->tx_new; bigmac_start_xmit()
967 bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bigmac_start_xmit()
968 bp->tx_skbs[entry] = skb; bigmac_start_xmit()
969 bp->bmac_block->be_txd[entry].tx_addr = mapping; bigmac_start_xmit()
970 bp->bmac_block->be_txd[entry].tx_flags = bigmac_start_xmit()
972 bp->tx_new = NEXT_TX(entry); bigmac_start_xmit()
973 if (TX_BUFFS_AVAIL(bp) <= 0) bigmac_start_xmit()
975 spin_unlock_irq(&bp->lock); bigmac_start_xmit()
978 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); bigmac_start_xmit()
986 struct bigmac *bp = netdev_priv(dev); bigmac_get_stats() local
988 bigmac_get_counters(bp, bp->bregs); bigmac_get_stats()
989 return &bp->enet_stats; bigmac_get_stats()
994 struct bigmac *bp = netdev_priv(dev); bigmac_set_multicast() local
995 void __iomem *bregs = bp->bregs; bigmac_set_multicast()
1046 struct bigmac *bp = netdev_priv(dev); bigmac_get_link() local
1048 spin_lock_irq(&bp->lock); bigmac_get_link()
1049 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); bigmac_get_link()
1050 spin_unlock_irq(&bp->lock); bigmac_get_link()
1052 return (bp->sw_bmsr & BMSR_LSTATUS); bigmac_get_link()
1078 struct bigmac *bp; bigmac_ether_init() local
1093 bp = netdev_priv(dev); bigmac_ether_init()
1094 bp->qec_op = qec_op; bigmac_ether_init()
1095 bp->bigmac_op = op; bigmac_ether_init()
1099 spin_lock_init(&bp->lock); bigmac_ether_init()
1102 bp->gregs = of_ioremap(&qec_op->resource[0], 0, bigmac_ether_init()
1104 if (!bp->gregs) { bigmac_ether_init()
1110 if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { bigmac_ether_init()
1116 if (qec_global_reset(bp->gregs)) bigmac_ether_init()
1129 bp->bigmac_bursts = bsizes; bigmac_ether_init()
1132 qec_init(bp); bigmac_ether_init()
1135 bp->creg = of_ioremap(&op->resource[0], 0, bigmac_ether_init()
1137 if (!bp->creg) { bigmac_ether_init()
1143 bp->bregs = of_ioremap(&op->resource[1], 0, bigmac_ether_init()
1145 if (!bp->bregs) { bigmac_ether_init()
1153 bp->tregs = of_ioremap(&op->resource[2], 0, bigmac_ether_init()
1155 if (!bp->tregs) { bigmac_ether_init()
1161 bigmac_stop(bp); bigmac_ether_init()
1164 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, bigmac_ether_init()
1166 &bp->bblock_dvma, GFP_ATOMIC); bigmac_ether_init()
1167 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) bigmac_ether_init()
1171 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, bigmac_ether_init()
1175 init_timer(&bp->bigmac_timer); bigmac_ether_init()
1176 bp->timer_state = asleep; bigmac_ether_init()
1177 bp->timer_ticks = 0; bigmac_ether_init()
1180 bp->dev = dev; bigmac_ether_init()
1188 dev->irq = bp->bigmac_op->archdata.irqs[0]; bigmac_ether_init()
1196 dev_set_drvdata(&bp->bigmac_op->dev, bp); bigmac_ether_init()
1206 if (bp->gregs) bigmac_ether_init()
1207 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); bigmac_ether_init()
1208 if (bp->creg) bigmac_ether_init()
1209 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); bigmac_ether_init()
1210 if (bp->bregs) bigmac_ether_init()
1211 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); bigmac_ether_init()
1212 if (bp->tregs) bigmac_ether_init()
1213 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); bigmac_ether_init()
1215 if (bp->bmac_block) bigmac_ether_init()
1216 dma_free_coherent(&bp->bigmac_op->dev, bigmac_ether_init()
1218 bp->bmac_block, bigmac_ether_init()
1219 bp->bblock_dvma); bigmac_ether_init()
1241 struct bigmac *bp = platform_get_drvdata(op); bigmac_sbus_remove() local
1243 struct net_device *net_dev = bp->dev; bigmac_sbus_remove()
1250 of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); bigmac_sbus_remove()
1251 of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); bigmac_sbus_remove()
1252 of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); bigmac_sbus_remove()
1253 of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); bigmac_sbus_remove()
1256 bp->bmac_block, bigmac_sbus_remove()
1257 bp->bblock_dvma); bigmac_sbus_remove()
H A Dsunbmac.h258 #define TX_BUFFS_AVAIL(bp) \
259 (((bp)->tx_old <= (bp)->tx_new) ? \
260 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
261 (bp)->tx_old - (bp)->tx_new - 1)
/linux-4.4.14/drivers/net/ethernet/
H A Ddnet.c30 static u16 dnet_readw_mac(struct dnet *bp, u16 reg) dnet_readw_mac() argument
35 dnet_writel(bp, reg, MACREG_ADDR); dnet_readw_mac()
42 data_read = dnet_readl(bp, MACREG_DATA); dnet_readw_mac()
49 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) dnet_writew_mac() argument
52 dnet_writel(bp, val, MACREG_DATA); dnet_writew_mac()
55 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); dnet_writew_mac()
62 static void __dnet_set_hwaddr(struct dnet *bp) __dnet_set_hwaddr() argument
66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); __dnet_set_hwaddr()
67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); __dnet_set_hwaddr()
68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); __dnet_set_hwaddr()
69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); __dnet_set_hwaddr()
70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); __dnet_set_hwaddr()
71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); __dnet_set_hwaddr()
74 static void dnet_get_hwaddr(struct dnet *bp) dnet_get_hwaddr() argument
91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); dnet_get_hwaddr()
93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); dnet_get_hwaddr()
95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); dnet_get_hwaddr()
99 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); dnet_get_hwaddr()
104 struct dnet *bp = bus->priv; dnet_mdio_read() local
107 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_read()
120 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); dnet_mdio_read()
123 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_read()
127 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); dnet_mdio_read()
137 struct dnet *bp = bus->priv; dnet_mdio_write() local
142 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_write()
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); dnet_mdio_write()
164 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); dnet_mdio_write()
166 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) dnet_mdio_write()
175 struct dnet *bp = netdev_priv(dev); dnet_handle_link_change() local
176 struct phy_device *phydev = bp->phy_dev; dnet_handle_link_change()
182 spin_lock_irqsave(&bp->lock, flags); dnet_handle_link_change()
184 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); dnet_handle_link_change()
185 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); dnet_handle_link_change()
188 if (bp->duplex != phydev->duplex) { dnet_handle_link_change()
196 bp->duplex = phydev->duplex; dnet_handle_link_change()
200 if (bp->speed != phydev->speed) { dnet_handle_link_change()
217 bp->speed = phydev->speed; dnet_handle_link_change()
221 if (phydev->link != bp->link) { dnet_handle_link_change()
229 bp->speed = 0; dnet_handle_link_change()
230 bp->duplex = -1; dnet_handle_link_change()
232 bp->link = phydev->link; dnet_handle_link_change()
238 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); dnet_handle_link_change()
239 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); dnet_handle_link_change()
242 spin_unlock_irqrestore(&bp->lock, flags); dnet_handle_link_change()
256 struct dnet *bp = netdev_priv(dev); dnet_mii_probe() local
262 if (bp->mii_bus->phy_map[phy_addr]) { dnet_mii_probe()
263 phydev = bp->mii_bus->phy_map[phy_addr]; dnet_mii_probe()
276 if (bp->capabilities & DNET_HAS_RMII) { dnet_mii_probe()
292 if (bp->capabilities & DNET_HAS_GIGABIT) dnet_mii_probe()
301 bp->link = 0; dnet_mii_probe()
302 bp->speed = 0; dnet_mii_probe()
303 bp->duplex = -1; dnet_mii_probe()
304 bp->phy_dev = phydev; dnet_mii_probe()
309 static int dnet_mii_init(struct dnet *bp) dnet_mii_init() argument
313 bp->mii_bus = mdiobus_alloc(); dnet_mii_init()
314 if (bp->mii_bus == NULL) dnet_mii_init()
317 bp->mii_bus->name = "dnet_mii_bus"; dnet_mii_init()
318 bp->mii_bus->read = &dnet_mdio_read; dnet_mii_init()
319 bp->mii_bus->write = &dnet_mdio_write; dnet_mii_init()
321 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", dnet_mii_init()
322 bp->pdev->name, bp->pdev->id); dnet_mii_init()
324 bp->mii_bus->priv = bp; dnet_mii_init()
326 bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, dnet_mii_init()
328 if (!bp->mii_bus->irq) { dnet_mii_init()
334 bp->mii_bus->irq[i] = PHY_POLL; dnet_mii_init()
336 if (mdiobus_register(bp->mii_bus)) { dnet_mii_init()
341 if (dnet_mii_probe(bp->dev) != 0) { dnet_mii_init()
349 mdiobus_unregister(bp->mii_bus); dnet_mii_init()
351 mdiobus_free(bp->mii_bus); dnet_mii_init()
361 static void dnet_update_stats(struct dnet *bp) dnet_update_stats() argument
363 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; dnet_update_stats()
364 u32 *p = &bp->hw_stats.rx_pkt_ignr; dnet_update_stats()
365 u32 *end = &bp->hw_stats.rx_byte + 1; dnet_update_stats()
373 reg = bp->regs + DNET_TX_UNICAST_CNT; dnet_update_stats()
374 p = &bp->hw_stats.tx_unicast; dnet_update_stats()
375 end = &bp->hw_stats.tx_byte + 1; dnet_update_stats()
386 struct dnet *bp = container_of(napi, struct dnet, napi); dnet_poll() local
387 struct net_device *dev = bp->dev; dnet_poll()
401 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) dnet_poll()
404 cmd_word = dnet_readl(bp, RX_LEN_FIFO); dnet_poll()
421 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); dnet_poll()
436 int_enable = dnet_readl(bp, INTR_ENB); dnet_poll()
438 dnet_writel(bp, int_enable, INTR_ENB); dnet_poll()
447 struct dnet *bp = netdev_priv(dev); dnet_interrupt() local
452 spin_lock_irqsave(&bp->lock, flags); dnet_interrupt()
455 int_src = dnet_readl(bp, INTR_SRC); dnet_interrupt()
456 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt()
461 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt()
463 dnet_writel(bp, int_enable, INTR_ENB); dnet_interrupt()
472 dnet_readl(bp, RX_STATUS), int_current); dnet_interrupt()
474 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); dnet_interrupt()
476 dnet_writel(bp, 0, SYS_CTL); dnet_interrupt()
484 dnet_readl(bp, TX_STATUS), int_current); dnet_interrupt()
486 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); dnet_interrupt()
488 dnet_writel(bp, 0, SYS_CTL); dnet_interrupt()
493 if (napi_schedule_prep(&bp->napi)) { dnet_interrupt()
499 int_enable = dnet_readl(bp, INTR_ENB); dnet_interrupt()
501 dnet_writel(bp, int_enable, INTR_ENB); dnet_interrupt()
502 __napi_schedule(&bp->napi); dnet_interrupt()
510 spin_unlock_irqrestore(&bp->lock, flags); dnet_interrupt()
531 struct dnet *bp = netdev_priv(dev); dnet_start_xmit() local
537 tx_status = dnet_readl(bp, TX_STATUS); dnet_start_xmit()
546 spin_lock_irqsave(&bp->lock, flags); dnet_start_xmit()
548 tx_status = dnet_readl(bp, TX_STATUS); dnet_start_xmit()
557 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { dnet_start_xmit()
559 dnet_writel(bp, *bufp++, TX_DATA_FIFO); dnet_start_xmit()
565 dnet_writel(bp, tx_cmd, TX_LEN_FIFO); dnet_start_xmit()
568 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { dnet_start_xmit()
570 tx_status = dnet_readl(bp, INTR_SRC); dnet_start_xmit()
571 irq_enable = dnet_readl(bp, INTR_ENB); dnet_start_xmit()
573 dnet_writel(bp, irq_enable, INTR_ENB); dnet_start_xmit()
581 spin_unlock_irqrestore(&bp->lock, flags); dnet_start_xmit()
586 static void dnet_reset_hw(struct dnet *bp) dnet_reset_hw() argument
589 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); dnet_reset_hw()
595 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); dnet_reset_hw()
600 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); dnet_reset_hw()
603 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, dnet_reset_hw()
606 dnet_writel(bp, 0, SYS_CTL); dnet_reset_hw()
609 static void dnet_init_hw(struct dnet *bp) dnet_init_hw() argument
613 dnet_reset_hw(bp); dnet_init_hw()
614 __dnet_set_hwaddr(bp); dnet_init_hw()
616 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); dnet_init_hw()
618 if (bp->dev->flags & IFF_PROMISC) dnet_init_hw()
621 if (!(bp->dev->flags & IFF_BROADCAST)) dnet_init_hw()
630 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); dnet_init_hw()
633 config = dnet_readl(bp, INTR_SRC); dnet_init_hw()
636 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | dnet_init_hw()
645 struct dnet *bp = netdev_priv(dev); dnet_open() local
648 if (!bp->phy_dev) dnet_open()
651 napi_enable(&bp->napi); dnet_open()
652 dnet_init_hw(bp); dnet_open()
654 phy_start_aneg(bp->phy_dev); dnet_open()
657 phy_start(bp->phy_dev); dnet_open()
666 struct dnet *bp = netdev_priv(dev); dnet_close() local
669 napi_disable(&bp->napi); dnet_close()
671 if (bp->phy_dev) dnet_close()
672 phy_stop(bp->phy_dev); dnet_close()
674 dnet_reset_hw(bp); dnet_close()
716 struct dnet *bp = netdev_priv(dev); dnet_get_stats() local
718 struct dnet_stats *hwstat = &bp->hw_stats; dnet_get_stats()
721 dnet_update_stats(bp); dnet_get_stats()
752 struct dnet *bp = netdev_priv(dev); dnet_get_settings() local
753 struct phy_device *phydev = bp->phy_dev; dnet_get_settings()
763 struct dnet *bp = netdev_priv(dev); dnet_set_settings() local
764 struct phy_device *phydev = bp->phy_dev; dnet_set_settings()
774 struct dnet *bp = netdev_priv(dev); dnet_ioctl() local
775 struct phy_device *phydev = bp->phy_dev; dnet_ioctl()
817 struct dnet *bp; dnet_probe() local
824 dev = alloc_etherdev(sizeof(*bp)); dnet_probe()
831 bp = netdev_priv(dev); dnet_probe()
832 bp->dev = dev; dnet_probe()
837 spin_lock_init(&bp->lock); dnet_probe()
840 bp->regs = devm_ioremap_resource(&pdev->dev, res); dnet_probe()
841 if (IS_ERR(bp->regs)) { dnet_probe()
842 err = PTR_ERR(bp->regs); dnet_probe()
855 netif_napi_add(dev, &bp->napi, dnet_poll, 64); dnet_probe()
858 dev->base_addr = (unsigned long)bp->regs; dnet_probe()
860 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; dnet_probe()
862 dnet_get_hwaddr(bp); dnet_probe()
867 __dnet_set_hwaddr(bp); dnet_probe()
883 err = dnet_mii_init(bp); dnet_probe()
888 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); dnet_probe()
890 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", dnet_probe()
891 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", dnet_probe()
892 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", dnet_probe()
893 (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); dnet_probe()
894 phydev = bp->phy_dev; dnet_probe()
914 struct dnet *bp; dnet_remove() local
919 bp = netdev_priv(dev); dnet_remove()
920 if (bp->phy_dev) dnet_remove()
921 phy_disconnect(bp->phy_dev); dnet_remove()
922 mdiobus_unregister(bp->mii_bus); dnet_remove()
923 mdiobus_free(bp->mii_bus); dnet_remove()
/linux-4.4.14/fs/afs/
H A Dfsclient.c24 const __be32 *bp = *_bp; xdr_decode_AFSFid() local
26 fid->vid = ntohl(*bp++); xdr_decode_AFSFid()
27 fid->vnode = ntohl(*bp++); xdr_decode_AFSFid()
28 fid->unique = ntohl(*bp++); xdr_decode_AFSFid()
29 *_bp = bp; xdr_decode_AFSFid()
41 const __be32 *bp = *_bp; xdr_decode_AFSFetchStatus() local
50 u32 x = ntohl(*bp++); \ xdr_decode_AFSFetchStatus()
55 status->if_version = ntohl(*bp++); xdr_decode_AFSFetchStatus()
58 size = ntohl(*bp++); xdr_decode_AFSFetchStatus()
59 data_version = ntohl(*bp++); xdr_decode_AFSFetchStatus()
61 owner = make_kuid(&init_user_ns, ntohl(*bp++)); xdr_decode_AFSFetchStatus()
69 bp++; /* seg size */ xdr_decode_AFSFetchStatus()
70 status->mtime_client = ntohl(*bp++); xdr_decode_AFSFetchStatus()
71 status->mtime_server = ntohl(*bp++); xdr_decode_AFSFetchStatus()
72 group = make_kgid(&init_user_ns, ntohl(*bp++)); xdr_decode_AFSFetchStatus()
75 bp++; /* sync counter */ xdr_decode_AFSFetchStatus()
76 data_version |= (u64) ntohl(*bp++) << 32; xdr_decode_AFSFetchStatus()
78 size |= (u64) ntohl(*bp++) << 32; xdr_decode_AFSFetchStatus()
79 bp++; /* spare 4 */ xdr_decode_AFSFetchStatus()
80 *_bp = bp; xdr_decode_AFSFetchStatus()
137 const __be32 *bp = *_bp; xdr_decode_AFSCallBack() local
139 vnode->cb_version = ntohl(*bp++); xdr_decode_AFSCallBack()
140 vnode->cb_expiry = ntohl(*bp++); xdr_decode_AFSCallBack()
141 vnode->cb_type = ntohl(*bp++); xdr_decode_AFSCallBack()
143 *_bp = bp; xdr_decode_AFSCallBack()
149 const __be32 *bp = *_bp; xdr_decode_AFSCallBack_raw() local
151 cb->version = ntohl(*bp++); xdr_decode_AFSCallBack_raw()
152 cb->expiry = ntohl(*bp++); xdr_decode_AFSCallBack_raw()
153 cb->type = ntohl(*bp++); xdr_decode_AFSCallBack_raw()
154 *_bp = bp; xdr_decode_AFSCallBack_raw()
163 const __be32 *bp = *_bp; xdr_decode_AFSVolSync() local
165 volsync->creation = ntohl(*bp++); xdr_decode_AFSVolSync()
166 bp++; /* spare2 */ xdr_decode_AFSVolSync()
167 bp++; /* spare3 */ xdr_decode_AFSVolSync()
168 bp++; /* spare4 */ xdr_decode_AFSVolSync()
169 bp++; /* spare5 */ xdr_decode_AFSVolSync()
170 bp++; /* spare6 */ xdr_decode_AFSVolSync()
171 *_bp = bp; xdr_decode_AFSVolSync()
179 __be32 *bp = *_bp; xdr_encode_AFS_StoreStatus() local
203 *bp++ = htonl(mask); xdr_encode_AFS_StoreStatus()
204 *bp++ = htonl(mtime); xdr_encode_AFS_StoreStatus()
205 *bp++ = htonl(owner); xdr_encode_AFS_StoreStatus()
206 *bp++ = htonl(group); xdr_encode_AFS_StoreStatus()
207 *bp++ = htonl(mode); xdr_encode_AFS_StoreStatus()
208 *bp++ = 0; /* segment size */ xdr_encode_AFS_StoreStatus()
209 *_bp = bp; xdr_encode_AFS_StoreStatus()
218 const __be32 *bp = *_bp; xdr_decode_AFSFetchVolumeStatus() local
220 vs->vid = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
221 vs->parent_id = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
222 vs->online = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
223 vs->in_service = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
224 vs->blessed = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
225 vs->needs_salvage = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
226 vs->type = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
227 vs->min_quota = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
228 vs->max_quota = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
229 vs->blocks_in_use = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
230 vs->part_blocks_avail = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
231 vs->part_max_blocks = ntohl(*bp++); xdr_decode_AFSFetchVolumeStatus()
232 *_bp = bp; xdr_decode_AFSFetchVolumeStatus()
242 const __be32 *bp; afs_deliver_fs_fetch_status() local
254 bp = call->buffer; afs_deliver_fs_fetch_status()
255 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_fetch_status()
256 xdr_decode_AFSCallBack(&bp, vnode); afs_deliver_fs_fetch_status()
258 xdr_decode_AFSVolSync(&bp, call->reply2); afs_deliver_fs_fetch_status()
284 __be32 *bp; afs_fs_fetch_file_status() local
300 bp = call->request; afs_fs_fetch_file_status()
301 bp[0] = htonl(FSFETCHSTATUS); afs_fs_fetch_file_status()
302 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_file_status()
303 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_file_status()
304 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_file_status()
316 const __be32 *bp; afs_deliver_fs_fetch_data() local
398 bp = call->buffer; afs_deliver_fs_fetch_data()
399 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_fetch_data()
400 xdr_decode_AFSCallBack(&bp, vnode); afs_deliver_fs_fetch_data()
402 xdr_decode_AFSVolSync(&bp, call->reply2); afs_deliver_fs_fetch_data()
457 __be32 *bp; afs_fs_fetch_data64() local
476 bp = call->request; afs_fs_fetch_data64()
477 bp[0] = htonl(FSFETCHDATA64); afs_fs_fetch_data64()
478 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_data64()
479 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_data64()
480 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_data64()
481 bp[4] = htonl(upper_32_bits(offset)); afs_fs_fetch_data64()
482 bp[5] = htonl((u32) offset); afs_fs_fetch_data64()
483 bp[6] = 0; afs_fs_fetch_data64()
484 bp[7] = htonl((u32) length); afs_fs_fetch_data64()
500 __be32 *bp; afs_fs_fetch_data() local
521 bp = call->request; afs_fs_fetch_data()
522 bp[0] = htonl(FSFETCHDATA); afs_fs_fetch_data()
523 bp[1] = htonl(vnode->fid.vid); afs_fs_fetch_data()
524 bp[2] = htonl(vnode->fid.vnode); afs_fs_fetch_data()
525 bp[3] = htonl(vnode->fid.unique); afs_fs_fetch_data()
526 bp[4] = htonl(offset); afs_fs_fetch_data()
527 bp[5] = htonl(length); afs_fs_fetch_data()
564 __be32 *bp, *tp; afs_fs_give_up_callbacks() local
588 bp = call->request; afs_fs_give_up_callbacks()
589 tp = bp + 2 + ncallbacks * 3; afs_fs_give_up_callbacks()
590 *bp++ = htonl(FSGIVEUPCALLBACKS); afs_fs_give_up_callbacks()
591 *bp++ = htonl(ncallbacks); afs_fs_give_up_callbacks()
599 *bp++ = htonl(cb->fid.vid); afs_fs_give_up_callbacks()
600 *bp++ = htonl(cb->fid.vnode); afs_fs_give_up_callbacks()
601 *bp++ = htonl(cb->fid.unique); afs_fs_give_up_callbacks()
624 const __be32 *bp; afs_deliver_fs_create_vnode() local
636 bp = call->buffer; afs_deliver_fs_create_vnode()
637 xdr_decode_AFSFid(&bp, call->reply2); afs_deliver_fs_create_vnode()
638 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL); afs_deliver_fs_create_vnode()
639 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_create_vnode()
640 xdr_decode_AFSCallBack_raw(&bp, call->reply4); afs_deliver_fs_create_vnode()
641 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_create_vnode()
672 __be32 *bp; afs_fs_create() local
694 bp = call->request; afs_fs_create()
695 *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE); afs_fs_create()
696 *bp++ = htonl(vnode->fid.vid); afs_fs_create()
697 *bp++ = htonl(vnode->fid.vnode); afs_fs_create()
698 *bp++ = htonl(vnode->fid.unique); afs_fs_create()
699 *bp++ = htonl(namesz); afs_fs_create()
700 memcpy(bp, name, namesz); afs_fs_create()
701 bp = (void *) bp + namesz; afs_fs_create()
703 memset(bp, 0, padsz); afs_fs_create()
704 bp = (void *) bp + padsz; afs_fs_create()
706 *bp++ = htonl(AFS_SET_MODE); afs_fs_create()
707 *bp++ = 0; /* mtime */ afs_fs_create()
708 *bp++ = 0; /* owner */ afs_fs_create()
709 *bp++ = 0; /* group */ afs_fs_create()
710 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ afs_fs_create()
711 *bp++ = 0; /* segment size */ afs_fs_create()
723 const __be32 *bp; afs_deliver_fs_remove() local
735 bp = call->buffer; afs_deliver_fs_remove()
736 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_remove()
737 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_remove()
765 __be32 *bp; afs_fs_remove() local
783 bp = call->request; afs_fs_remove()
784 *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE); afs_fs_remove()
785 *bp++ = htonl(vnode->fid.vid); afs_fs_remove()
786 *bp++ = htonl(vnode->fid.vnode); afs_fs_remove()
787 *bp++ = htonl(vnode->fid.unique); afs_fs_remove()
788 *bp++ = htonl(namesz); afs_fs_remove()
789 memcpy(bp, name, namesz); afs_fs_remove()
790 bp = (void *) bp + namesz; afs_fs_remove()
792 memset(bp, 0, padsz); afs_fs_remove()
793 bp = (void *) bp + padsz; afs_fs_remove()
806 const __be32 *bp; afs_deliver_fs_link() local
818 bp = call->buffer; afs_deliver_fs_link()
819 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_link()
820 xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode, NULL); afs_deliver_fs_link()
821 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_link()
849 __be32 *bp; afs_fs_link() local
868 bp = call->request; afs_fs_link()
869 *bp++ = htonl(FSLINK); afs_fs_link()
870 *bp++ = htonl(dvnode->fid.vid); afs_fs_link()
871 *bp++ = htonl(dvnode->fid.vnode); afs_fs_link()
872 *bp++ = htonl(dvnode->fid.unique); afs_fs_link()
873 *bp++ = htonl(namesz); afs_fs_link()
874 memcpy(bp, name, namesz); afs_fs_link()
875 bp = (void *) bp + namesz; afs_fs_link()
877 memset(bp, 0, padsz); afs_fs_link()
878 bp = (void *) bp + padsz; afs_fs_link()
880 *bp++ = htonl(vnode->fid.vid); afs_fs_link()
881 *bp++ = htonl(vnode->fid.vnode); afs_fs_link()
882 *bp++ = htonl(vnode->fid.unique); afs_fs_link()
894 const __be32 *bp; afs_deliver_fs_symlink() local
906 bp = call->buffer; afs_deliver_fs_symlink()
907 xdr_decode_AFSFid(&bp, call->reply2); afs_deliver_fs_symlink()
908 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL, NULL); afs_deliver_fs_symlink()
909 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); afs_deliver_fs_symlink()
910 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_symlink()
940 __be32 *bp; afs_fs_symlink() local
965 bp = call->request; afs_fs_symlink()
966 *bp++ = htonl(FSSYMLINK); afs_fs_symlink()
967 *bp++ = htonl(vnode->fid.vid); afs_fs_symlink()
968 *bp++ = htonl(vnode->fid.vnode); afs_fs_symlink()
969 *bp++ = htonl(vnode->fid.unique); afs_fs_symlink()
970 *bp++ = htonl(namesz); afs_fs_symlink()
971 memcpy(bp, name, namesz); afs_fs_symlink()
972 bp = (void *) bp + namesz; afs_fs_symlink()
974 memset(bp, 0, padsz); afs_fs_symlink()
975 bp = (void *) bp + padsz; afs_fs_symlink()
977 *bp++ = htonl(c_namesz); afs_fs_symlink()
978 memcpy(bp, contents, c_namesz); afs_fs_symlink()
979 bp = (void *) bp + c_namesz; afs_fs_symlink()
981 memset(bp, 0, c_padsz); afs_fs_symlink()
982 bp = (void *) bp + c_padsz; afs_fs_symlink()
984 *bp++ = htonl(AFS_SET_MODE); afs_fs_symlink()
985 *bp++ = 0; /* mtime */ afs_fs_symlink()
986 *bp++ = 0; /* owner */ afs_fs_symlink()
987 *bp++ = 0; /* group */ afs_fs_symlink()
988 *bp++ = htonl(S_IRWXUGO); /* unix mode */ afs_fs_symlink()
989 *bp++ = 0; /* segment size */ afs_fs_symlink()
1001 const __be32 *bp; afs_deliver_fs_rename() local
1013 bp = call->buffer; afs_deliver_fs_rename()
1014 xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode, NULL); afs_deliver_fs_rename()
1016 xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode, afs_deliver_fs_rename()
1018 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_rename()
1047 __be32 *bp; afs_fs_rename() local
1073 bp = call->request; afs_fs_rename()
1074 *bp++ = htonl(FSRENAME); afs_fs_rename()
1075 *bp++ = htonl(orig_dvnode->fid.vid); afs_fs_rename()
1076 *bp++ = htonl(orig_dvnode->fid.vnode); afs_fs_rename()
1077 *bp++ = htonl(orig_dvnode->fid.unique); afs_fs_rename()
1078 *bp++ = htonl(o_namesz); afs_fs_rename()
1079 memcpy(bp, orig_name, o_namesz); afs_fs_rename()
1080 bp = (void *) bp + o_namesz; afs_fs_rename()
1082 memset(bp, 0, o_padsz); afs_fs_rename()
1083 bp = (void *) bp + o_padsz; afs_fs_rename()
1086 *bp++ = htonl(new_dvnode->fid.vid); afs_fs_rename()
1087 *bp++ = htonl(new_dvnode->fid.vnode); afs_fs_rename()
1088 *bp++ = htonl(new_dvnode->fid.unique); afs_fs_rename()
1089 *bp++ = htonl(n_namesz); afs_fs_rename()
1090 memcpy(bp, new_name, n_namesz); afs_fs_rename()
1091 bp = (void *) bp + n_namesz; afs_fs_rename()
1093 memset(bp, 0, n_padsz); afs_fs_rename()
1094 bp = (void *) bp + n_padsz; afs_fs_rename()
1107 const __be32 *bp; afs_deliver_fs_store_data() local
1124 bp = call->buffer; afs_deliver_fs_store_data()
1125 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, afs_deliver_fs_store_data()
1127 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_store_data()
1164 __be32 *bp; afs_fs_store_data64() local
1189 bp = call->request; afs_fs_store_data64()
1190 *bp++ = htonl(FSSTOREDATA64); afs_fs_store_data64()
1191 *bp++ = htonl(vnode->fid.vid); afs_fs_store_data64()
1192 *bp++ = htonl(vnode->fid.vnode); afs_fs_store_data64()
1193 *bp++ = htonl(vnode->fid.unique); afs_fs_store_data64()
1195 *bp++ = 0; /* mask */ afs_fs_store_data64()
1196 *bp++ = 0; /* mtime */ afs_fs_store_data64()
1197 *bp++ = 0; /* owner */ afs_fs_store_data64()
1198 *bp++ = 0; /* group */ afs_fs_store_data64()
1199 *bp++ = 0; /* unix mode */ afs_fs_store_data64()
1200 *bp++ = 0; /* segment size */ afs_fs_store_data64()
1202 *bp++ = htonl(pos >> 32); afs_fs_store_data64()
1203 *bp++ = htonl((u32) pos); afs_fs_store_data64()
1204 *bp++ = htonl(size >> 32); afs_fs_store_data64()
1205 *bp++ = htonl((u32) size); afs_fs_store_data64()
1206 *bp++ = htonl(i_size >> 32); afs_fs_store_data64()
1207 *bp++ = htonl((u32) i_size); afs_fs_store_data64()
1223 __be32 *bp; afs_fs_store_data() local
1266 bp = call->request; afs_fs_store_data()
1267 *bp++ = htonl(FSSTOREDATA); afs_fs_store_data()
1268 *bp++ = htonl(vnode->fid.vid); afs_fs_store_data()
1269 *bp++ = htonl(vnode->fid.vnode); afs_fs_store_data()
1270 *bp++ = htonl(vnode->fid.unique); afs_fs_store_data()
1272 *bp++ = 0; /* mask */ afs_fs_store_data()
1273 *bp++ = 0; /* mtime */ afs_fs_store_data()
1274 *bp++ = 0; /* owner */ afs_fs_store_data()
1275 *bp++ = 0; /* group */ afs_fs_store_data()
1276 *bp++ = 0; /* unix mode */ afs_fs_store_data()
1277 *bp++ = 0; /* segment size */ afs_fs_store_data()
1279 *bp++ = htonl(pos); afs_fs_store_data()
1280 *bp++ = htonl(size); afs_fs_store_data()
1281 *bp++ = htonl(i_size); afs_fs_store_data()
1294 const __be32 *bp; afs_deliver_fs_store_status() local
1315 bp = call->buffer; afs_deliver_fs_store_status()
1316 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, store_version); afs_deliver_fs_store_status()
1317 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_store_status()
1356 __be32 *bp; afs_fs_setattr_size64() local
1377 bp = call->request; afs_fs_setattr_size64()
1378 *bp++ = htonl(FSSTOREDATA64); afs_fs_setattr_size64()
1379 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr_size64()
1380 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr_size64()
1381 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr_size64()
1383 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr_size64()
1385 *bp++ = 0; /* position of start of write */ afs_fs_setattr_size64()
1386 *bp++ = 0; afs_fs_setattr_size64()
1387 *bp++ = 0; /* size of write */ afs_fs_setattr_size64()
1388 *bp++ = 0; afs_fs_setattr_size64()
1389 *bp++ = htonl(attr->ia_size >> 32); /* new file length */ afs_fs_setattr_size64()
1390 *bp++ = htonl((u32) attr->ia_size); afs_fs_setattr_size64()
1404 __be32 *bp; afs_fs_setattr_size() local
1428 bp = call->request; afs_fs_setattr_size()
1429 *bp++ = htonl(FSSTOREDATA); afs_fs_setattr_size()
1430 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr_size()
1431 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr_size()
1432 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr_size()
1434 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr_size()
1436 *bp++ = 0; /* position of start of write */ afs_fs_setattr_size()
1437 *bp++ = 0; /* size of write */ afs_fs_setattr_size()
1438 *bp++ = htonl(attr->ia_size); /* new file length */ afs_fs_setattr_size()
1452 __be32 *bp; afs_fs_setattr() local
1474 bp = call->request; afs_fs_setattr()
1475 *bp++ = htonl(FSSTORESTATUS); afs_fs_setattr()
1476 *bp++ = htonl(vnode->fid.vid); afs_fs_setattr()
1477 *bp++ = htonl(vnode->fid.vnode); afs_fs_setattr()
1478 *bp++ = htonl(vnode->fid.unique); afs_fs_setattr()
1480 xdr_encode_AFS_StoreStatus(&bp, attr); afs_fs_setattr()
1491 const __be32 *bp; afs_deliver_fs_get_volume_status() local
1513 bp = call->buffer; afs_deliver_fs_get_volume_status()
1514 xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); afs_deliver_fs_get_volume_status()
1730 __be32 *bp; afs_fs_get_volume_status() local
1753 bp = call->request; afs_fs_get_volume_status()
1754 bp[0] = htonl(FSGETVOLUMESTATUS); afs_fs_get_volume_status()
1755 bp[1] = htonl(vnode->fid.vid); afs_fs_get_volume_status()
1766 const __be32 *bp; afs_deliver_fs_xxxx_lock() local
1778 bp = call->buffer; afs_deliver_fs_xxxx_lock()
1779 /* xdr_decode_AFSVolSync(&bp, call->replyX); */ afs_deliver_fs_xxxx_lock()
1825 __be32 *bp; afs_fs_set_lock() local
1839 bp = call->request; afs_fs_set_lock()
1840 *bp++ = htonl(FSSETLOCK); afs_fs_set_lock()
1841 *bp++ = htonl(vnode->fid.vid); afs_fs_set_lock()
1842 *bp++ = htonl(vnode->fid.vnode); afs_fs_set_lock()
1843 *bp++ = htonl(vnode->fid.unique); afs_fs_set_lock()
1844 *bp++ = htonl(type); afs_fs_set_lock()
1858 __be32 *bp; afs_fs_extend_lock() local
1872 bp = call->request; afs_fs_extend_lock()
1873 *bp++ = htonl(FSEXTENDLOCK); afs_fs_extend_lock()
1874 *bp++ = htonl(vnode->fid.vid); afs_fs_extend_lock()
1875 *bp++ = htonl(vnode->fid.vnode); afs_fs_extend_lock()
1876 *bp++ = htonl(vnode->fid.unique); afs_fs_extend_lock()
1890 __be32 *bp; afs_fs_release_lock() local
1904 bp = call->request; afs_fs_release_lock()
1905 *bp++ = htonl(FSRELEASELOCK); afs_fs_release_lock()
1906 *bp++ = htonl(vnode->fid.vid); afs_fs_release_lock()
1907 *bp++ = htonl(vnode->fid.vnode); afs_fs_release_lock()
1908 *bp++ = htonl(vnode->fid.unique); afs_fs_release_lock()
H A Dvlclient.c65 __be32 *bp; afs_deliver_vl_get_entry_by_xxx() local
80 bp = call->buffer; afs_deliver_vl_get_entry_by_xxx()
83 entry->name[loop] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
85 bp++; /* final NUL */ afs_deliver_vl_get_entry_by_xxx()
87 bp++; /* type */ afs_deliver_vl_get_entry_by_xxx()
88 entry->nservers = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
91 entry->servers[loop].s_addr = *bp++; afs_deliver_vl_get_entry_by_xxx()
93 bp += 8; /* partition IDs */ afs_deliver_vl_get_entry_by_xxx()
96 tmp = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
106 entry->vid[0] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
107 entry->vid[1] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
108 entry->vid[2] = ntohl(*bp++); afs_deliver_vl_get_entry_by_xxx()
110 bp++; /* clone ID */ afs_deliver_vl_get_entry_by_xxx()
112 tmp = ntohl(*bp++); /* flags */ afs_deliver_vl_get_entry_by_xxx()
158 __be32 *bp; afs_vl_get_entry_by_name() local
176 bp = call->request; afs_vl_get_entry_by_name()
177 *bp++ = htonl(VLGETENTRYBYNAME); afs_vl_get_entry_by_name()
178 *bp++ = htonl(volnamesz); afs_vl_get_entry_by_name()
179 memcpy(bp, volname, volnamesz); afs_vl_get_entry_by_name()
181 memset((void *) bp + volnamesz, 0, padsz); afs_vl_get_entry_by_name()
198 __be32 *bp; afs_vl_get_entry_by_id() local
212 bp = call->request; afs_vl_get_entry_by_id()
213 *bp++ = htonl(VLGETENTRYBYID); afs_vl_get_entry_by_id()
214 *bp++ = htonl(volid); afs_vl_get_entry_by_id()
215 *bp = htonl(voltype); afs_vl_get_entry_by_id()
H A Dcmservice.c177 __be32 *bp; afs_deliver_cb_callback() local
227 bp = call->buffer; afs_deliver_cb_callback()
229 cb->fid.vid = ntohl(*bp++); afs_deliver_cb_callback()
230 cb->fid.vnode = ntohl(*bp++); afs_deliver_cb_callback()
231 cb->fid.unique = ntohl(*bp++); afs_deliver_cb_callback()
269 bp = call->buffer; afs_deliver_cb_callback()
271 cb->version = ntohl(*bp++); afs_deliver_cb_callback()
272 cb->expiry = ntohl(*bp++); afs_deliver_cb_callback()
273 cb->type = ntohl(*bp++); afs_deliver_cb_callback()
/linux-4.4.14/drivers/media/usb/pvrusb2/
H A Dpvrusb2-io.c36 #define BUFFER_CHECK(bp) do { \
37 if ((bp)->signature != BUFFER_SIG) { \
40 (bp),__FILE__,__LINE__); \
41 pvr2_buffer_describe(bp,"BadSig"); \
46 #define BUFFER_CHECK(bp) do {} while(0)
113 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) pvr2_buffer_describe() argument
120 bp, pvr2_buffer_describe()
121 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), pvr2_buffer_describe()
122 (bp ? bp->id : 0), pvr2_buffer_describe()
123 (bp ? bp->status : 0), pvr2_buffer_describe()
124 (bp ? bp->stream : NULL), pvr2_buffer_describe()
125 (bp ? bp->purb : NULL), pvr2_buffer_describe()
126 (bp ? bp->signature : 0)); pvr2_buffer_describe()
130 static void pvr2_buffer_remove(struct pvr2_buffer *bp) pvr2_buffer_remove() argument
135 struct pvr2_stream *sp = bp->stream; pvr2_buffer_remove()
136 switch (bp->state) { pvr2_buffer_remove()
140 ccnt = bp->max_count; pvr2_buffer_remove()
145 ccnt = bp->max_count; pvr2_buffer_remove()
150 ccnt = bp->used_count; pvr2_buffer_remove()
155 list_del_init(&bp->list_overhead); pvr2_buffer_remove()
161 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); pvr2_buffer_remove()
162 bp->state = pvr2_buffer_state_none; pvr2_buffer_remove()
165 static void pvr2_buffer_set_none(struct pvr2_buffer *bp) pvr2_buffer_set_none() argument
169 BUFFER_CHECK(bp); pvr2_buffer_set_none()
170 sp = bp->stream; pvr2_buffer_set_none()
173 bp, pvr2_buffer_set_none()
174 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_none()
177 pvr2_buffer_remove(bp); pvr2_buffer_set_none()
181 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) pvr2_buffer_set_ready() argument
186 BUFFER_CHECK(bp); pvr2_buffer_set_ready()
187 sp = bp->stream; pvr2_buffer_set_ready()
190 bp, pvr2_buffer_set_ready()
191 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_ready()
195 pvr2_buffer_remove(bp); pvr2_buffer_set_ready()
196 list_add_tail(&bp->list_overhead,&sp->ready_list); pvr2_buffer_set_ready()
197 bp->state = pvr2_buffer_state_ready; pvr2_buffer_set_ready()
199 sp->r_bcount += bp->used_count; pvr2_buffer_set_ready()
203 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_ready()
209 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) pvr2_buffer_set_idle() argument
213 BUFFER_CHECK(bp); pvr2_buffer_set_idle()
214 sp = bp->stream; pvr2_buffer_set_idle()
217 bp, pvr2_buffer_set_idle()
218 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_idle()
221 pvr2_buffer_remove(bp); pvr2_buffer_set_idle()
222 list_add_tail(&bp->list_overhead,&sp->idle_list); pvr2_buffer_set_idle()
223 bp->state = pvr2_buffer_state_idle; pvr2_buffer_set_idle()
225 sp->i_bcount += bp->max_count; pvr2_buffer_set_idle()
229 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_idle()
234 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) pvr2_buffer_set_queued() argument
238 BUFFER_CHECK(bp); pvr2_buffer_set_queued()
239 sp = bp->stream; pvr2_buffer_set_queued()
242 bp, pvr2_buffer_set_queued()
243 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_queued()
246 pvr2_buffer_remove(bp); pvr2_buffer_set_queued()
247 list_add_tail(&bp->list_overhead,&sp->queued_list); pvr2_buffer_set_queued()
248 bp->state = pvr2_buffer_state_queued; pvr2_buffer_set_queued()
250 sp->q_bcount += bp->max_count; pvr2_buffer_set_queued()
254 pvr2_buffer_state_decode(bp->state), pvr2_buffer_set_queued()
259 static void pvr2_buffer_wipe(struct pvr2_buffer *bp) pvr2_buffer_wipe() argument
261 if (bp->state == pvr2_buffer_state_queued) { pvr2_buffer_wipe()
262 usb_kill_urb(bp->purb); pvr2_buffer_wipe()
266 static int pvr2_buffer_init(struct pvr2_buffer *bp, pvr2_buffer_init() argument
270 memset(bp,0,sizeof(*bp)); pvr2_buffer_init()
271 bp->signature = BUFFER_SIG; pvr2_buffer_init()
272 bp->id = id; pvr2_buffer_init()
274 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); pvr2_buffer_init()
275 bp->stream = sp; pvr2_buffer_init()
276 bp->state = pvr2_buffer_state_none; pvr2_buffer_init()
277 INIT_LIST_HEAD(&bp->list_overhead); pvr2_buffer_init()
278 bp->purb = usb_alloc_urb(0,GFP_KERNEL); pvr2_buffer_init()
279 if (! bp->purb) return -ENOMEM; pvr2_buffer_init()
281 pvr2_buffer_describe(bp,"create"); pvr2_buffer_init()
286 static void pvr2_buffer_done(struct pvr2_buffer *bp) pvr2_buffer_done() argument
289 pvr2_buffer_describe(bp,"delete"); pvr2_buffer_done()
291 pvr2_buffer_wipe(bp); pvr2_buffer_done()
292 pvr2_buffer_set_none(bp); pvr2_buffer_done()
293 bp->signature = 0; pvr2_buffer_done()
294 bp->stream = NULL; pvr2_buffer_done()
295 usb_free_urb(bp->purb); pvr2_buffer_done()
297 " bufferDone %p",bp); pvr2_buffer_done()
332 struct pvr2_buffer *bp; pvr2_stream_buffer_count() local
333 bp = kmalloc(sizeof(*bp),GFP_KERNEL); pvr2_stream_buffer_count()
334 if (!bp) return -ENOMEM; pvr2_stream_buffer_count()
335 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); pvr2_stream_buffer_count()
337 kfree(bp); pvr2_stream_buffer_count()
340 sp->buffers[sp->buffer_total_count] = bp; pvr2_stream_buffer_count()
342 pvr2_buffer_set_idle(bp); pvr2_stream_buffer_count()
346 struct pvr2_buffer *bp; pvr2_stream_buffer_count() local
347 bp = sp->buffers[sp->buffer_total_count - 1]; pvr2_stream_buffer_count()
351 pvr2_buffer_done(bp); pvr2_stream_buffer_count()
352 kfree(bp); pvr2_stream_buffer_count()
371 struct pvr2_buffer *bp; pvr2_stream_achieve_buffer_count() local
387 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; pvr2_stream_achieve_buffer_count()
388 if (bp->state != pvr2_buffer_state_idle) break; pvr2_stream_achieve_buffer_count()
437 struct pvr2_buffer *bp = urb->context; buffer_complete() local
440 BUFFER_CHECK(bp); buffer_complete()
441 sp = bp->stream; buffer_complete()
442 bp->used_count = 0; buffer_complete()
443 bp->status = 0; buffer_complete()
446 bp,urb->status,urb->actual_length); buffer_complete()
454 bp->used_count = urb->actual_length; buffer_complete()
472 bp->status = urb->status; buffer_complete()
475 pvr2_buffer_set_ready(bp); buffer_complete()
596 struct pvr2_buffer *bp; pvr2_stream_kill() local
600 while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) { pvr2_stream_kill()
601 pvr2_buffer_set_idle(bp); pvr2_stream_kill()
610 int pvr2_buffer_queue(struct pvr2_buffer *bp) pvr2_buffer_queue() argument
619 if (!bp) return -EINVAL; pvr2_buffer_queue()
620 sp = bp->stream; pvr2_buffer_queue()
623 pvr2_buffer_wipe(bp); pvr2_buffer_queue()
628 pvr2_buffer_set_queued(bp); pvr2_buffer_queue()
630 for (idx = 0; idx < (bp->max_count) / 4; idx++) { pvr2_buffer_queue()
631 val = bp->id << 24; pvr2_buffer_queue()
633 ((unsigned int *)(bp->ptr))[idx] = val; pvr2_buffer_queue()
636 bp->status = -EINPROGRESS; pvr2_buffer_queue()
637 usb_fill_bulk_urb(bp->purb, // struct urb *urb pvr2_buffer_queue()
641 bp->ptr, // void *transfer_buffer pvr2_buffer_queue()
642 bp->max_count, // int buffer_length pvr2_buffer_queue()
644 bp); pvr2_buffer_queue()
645 usb_submit_urb(bp->purb,GFP_KERNEL); pvr2_buffer_queue()
651 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) pvr2_buffer_set_buffer() argument
656 if (!bp) return -EINVAL; pvr2_buffer_set_buffer()
657 sp = bp->stream; pvr2_buffer_set_buffer()
661 if (bp->state != pvr2_buffer_state_idle) { pvr2_buffer_set_buffer()
664 bp->ptr = ptr; pvr2_buffer_set_buffer()
665 bp->stream->i_bcount -= bp->max_count; pvr2_buffer_set_buffer()
666 bp->max_count = cnt; pvr2_buffer_set_buffer()
667 bp->stream->i_bcount += bp->max_count; pvr2_buffer_set_buffer()
673 bp->stream->i_bcount,bp->stream->i_count); pvr2_buffer_set_buffer()
681 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) pvr2_buffer_get_count() argument
683 return bp->used_count; pvr2_buffer_get_count()
686 int pvr2_buffer_get_status(struct pvr2_buffer *bp) pvr2_buffer_get_status() argument
688 return bp->status; pvr2_buffer_get_status()
691 int pvr2_buffer_get_id(struct pvr2_buffer *bp) pvr2_buffer_get_id() argument
693 return bp->id; pvr2_buffer_get_id()
H A Dpvrusb2-dvb.c38 struct pvr2_buffer *bp; pvr2_dvb_feed_func() local
52 bp = pvr2_stream_get_ready_buffer(stream); pvr2_dvb_feed_func()
53 if (bp != NULL) { pvr2_dvb_feed_func()
54 count = pvr2_buffer_get_count(bp); pvr2_dvb_feed_func()
59 pvr2_buffer_get_id(bp)], pvr2_dvb_feed_func()
62 ret = pvr2_buffer_get_status(bp); pvr2_dvb_feed_func()
65 ret = pvr2_buffer_queue(bp); pvr2_dvb_feed_func()
147 struct pvr2_buffer *bp; pvr2_dvb_stream_do_start() local
171 bp = pvr2_stream_get_buffer(stream, idx); pvr2_dvb_stream_do_start()
172 pvr2_buffer_set_buffer(bp, pvr2_dvb_stream_do_start()
180 while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) { pvr2_dvb_stream_do_start()
181 ret = pvr2_buffer_queue(bp); pvr2_dvb_stream_do_start()
H A Dpvrusb2-ioread.c163 struct pvr2_buffer *bp; pvr2_ioread_start() local
168 while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != NULL) { pvr2_ioread_start()
169 stat = pvr2_buffer_queue(bp); pvr2_ioread_start()
206 struct pvr2_buffer *bp; pvr2_ioread_setup() local
232 bp = pvr2_stream_get_buffer(sp,idx); pvr2_ioread_setup()
233 pvr2_buffer_set_buffer(bp, pvr2_ioread_setup()
/linux-4.4.14/drivers/net/ethernet/cadence/
H A Dmacb.c97 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) macb_rx_desc() argument
99 return &bp->rx_ring[macb_rx_ring_wrap(index)]; macb_rx_desc()
102 static void *macb_rx_buffer(struct macb *bp, unsigned int index) macb_rx_buffer() argument
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); macb_rx_buffer()
108 static u32 hw_readl_native(struct macb *bp, int offset) hw_readl_native() argument
110 return __raw_readl(bp->regs + offset); hw_readl_native()
113 static void hw_writel_native(struct macb *bp, int offset, u32 value) hw_writel_native() argument
115 __raw_writel(value, bp->regs + offset); hw_writel_native()
118 static u32 hw_readl(struct macb *bp, int offset) hw_readl() argument
120 return readl_relaxed(bp->regs + offset); hw_readl()
123 static void hw_writel(struct macb *bp, int offset, u32 value) hw_writel() argument
125 writel_relaxed(value, bp->regs + offset); hw_writel()
158 static void macb_set_hwaddr(struct macb *bp) macb_set_hwaddr() argument
163 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); macb_set_hwaddr()
164 macb_or_gem_writel(bp, SA1B, bottom); macb_set_hwaddr()
165 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); macb_set_hwaddr()
166 macb_or_gem_writel(bp, SA1T, top); macb_set_hwaddr()
169 macb_or_gem_writel(bp, SA2B, 0); macb_set_hwaddr()
170 macb_or_gem_writel(bp, SA2T, 0); macb_set_hwaddr()
171 macb_or_gem_writel(bp, SA3B, 0); macb_set_hwaddr()
172 macb_or_gem_writel(bp, SA3T, 0); macb_set_hwaddr()
173 macb_or_gem_writel(bp, SA4B, 0); macb_set_hwaddr()
174 macb_or_gem_writel(bp, SA4T, 0); macb_set_hwaddr()
177 static void macb_get_hwaddr(struct macb *bp) macb_get_hwaddr() argument
185 pdata = dev_get_platdata(&bp->pdev->dev); macb_get_hwaddr()
189 bottom = macb_or_gem_readl(bp, SA1B + i * 8); macb_get_hwaddr()
190 top = macb_or_gem_readl(bp, SA1T + i * 8); macb_get_hwaddr()
209 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); macb_get_hwaddr()
214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); macb_get_hwaddr()
215 eth_hw_addr_random(bp->dev); macb_get_hwaddr()
220 struct macb *bp = bus->priv; macb_mdio_read() local
223 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) macb_mdio_read()
230 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) macb_mdio_read()
233 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); macb_mdio_read()
241 struct macb *bp = bus->priv; macb_mdio_write() local
243 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) macb_mdio_write()
251 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) macb_mdio_write()
303 struct macb *bp = netdev_priv(dev); macb_handle_link_change() local
304 struct phy_device *phydev = bp->phy_dev; macb_handle_link_change()
308 spin_lock_irqsave(&bp->lock, flags); macb_handle_link_change()
311 if ((bp->speed != phydev->speed) || macb_handle_link_change()
312 (bp->duplex != phydev->duplex)) { macb_handle_link_change()
315 reg = macb_readl(bp, NCFGR); macb_handle_link_change()
317 if (macb_is_gem(bp)) macb_handle_link_change()
325 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) macb_handle_link_change()
328 macb_or_gem_writel(bp, NCFGR, reg); macb_handle_link_change()
330 bp->speed = phydev->speed; macb_handle_link_change()
331 bp->duplex = phydev->duplex; macb_handle_link_change()
336 if (phydev->link != bp->link) { macb_handle_link_change()
338 bp->speed = 0; macb_handle_link_change()
339 bp->duplex = -1; macb_handle_link_change()
341 bp->link = phydev->link; macb_handle_link_change()
346 spin_unlock_irqrestore(&bp->lock, flags); macb_handle_link_change()
353 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); macb_handle_link_change()
370 struct macb *bp = netdev_priv(dev); macb_mii_probe() local
376 phydev = phy_find_first(bp->mii_bus); macb_mii_probe()
382 pdata = dev_get_platdata(&bp->pdev->dev); macb_mii_probe()
384 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); macb_mii_probe()
393 bp->phy_interface); macb_mii_probe()
400 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) macb_mii_probe()
405 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) macb_mii_probe()
410 bp->link = 0; macb_mii_probe()
411 bp->speed = 0; macb_mii_probe()
412 bp->duplex = -1; macb_mii_probe()
413 bp->phy_dev = phydev; macb_mii_probe()
418 static int macb_mii_init(struct macb *bp) macb_mii_init() argument
425 macb_writel(bp, NCR, MACB_BIT(MPE)); macb_mii_init()
427 bp->mii_bus = mdiobus_alloc(); macb_mii_init()
428 if (bp->mii_bus == NULL) { macb_mii_init()
433 bp->mii_bus->name = "MACB_mii_bus"; macb_mii_init()
434 bp->mii_bus->read = &macb_mdio_read; macb_mii_init()
435 bp->mii_bus->write = &macb_mdio_write; macb_mii_init()
436 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", macb_mii_init()
437 bp->pdev->name, bp->pdev->id); macb_mii_init()
438 bp->mii_bus->priv = bp; macb_mii_init()
439 bp->mii_bus->parent = &bp->dev->dev; macb_mii_init()
440 pdata = dev_get_platdata(&bp->pdev->dev); macb_mii_init()
442 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); macb_mii_init()
443 if (!bp->mii_bus->irq) { macb_mii_init()
448 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); macb_mii_init()
450 np = bp->pdev->dev.of_node; macb_mii_init()
453 err = of_mdiobus_register(bp->mii_bus, np); macb_mii_init()
457 if (!err && !phy_find_first(bp->mii_bus)) { macb_mii_init()
461 phydev = mdiobus_scan(bp->mii_bus, i); macb_mii_init()
473 bp->mii_bus->irq[i] = PHY_POLL; macb_mii_init()
476 bp->mii_bus->phy_mask = pdata->phy_mask; macb_mii_init()
478 err = mdiobus_register(bp->mii_bus); macb_mii_init()
484 err = macb_mii_probe(bp->dev); macb_mii_init()
491 mdiobus_unregister(bp->mii_bus); macb_mii_init()
493 kfree(bp->mii_bus->irq); macb_mii_init()
495 mdiobus_free(bp->mii_bus); macb_mii_init()
500 static void macb_update_stats(struct macb *bp) macb_update_stats() argument
502 u32 *p = &bp->hw_stats.macb.rx_pause_frames; macb_update_stats()
503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; macb_update_stats()
509 *p += bp->macb_reg_readl(bp, offset); macb_update_stats()
512 static int macb_halt_tx(struct macb *bp) macb_halt_tx() argument
517 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); macb_halt_tx()
522 status = macb_readl(bp, TSR); macb_halt_tx()
532 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) macb_tx_unmap() argument
536 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, macb_tx_unmap()
539 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, macb_tx_unmap()
554 struct macb *bp = queue->bp; macb_tx_error_task() local
561 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", macb_tx_error_task()
562 (unsigned int)(queue - bp->queues), macb_tx_error_task()
571 spin_lock_irqsave(&bp->lock, flags); macb_tx_error_task()
574 netif_tx_stop_all_queues(bp->dev); macb_tx_error_task()
581 if (macb_halt_tx(bp)) macb_tx_error_task()
583 netdev_err(bp->dev, "BUG: halt tx timed out\n"); macb_tx_error_task()
600 macb_tx_unmap(bp, tx_skb); macb_tx_error_task()
610 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", macb_tx_error_task()
612 bp->stats.tx_packets++; macb_tx_error_task()
613 bp->stats.tx_bytes += skb->len; macb_tx_error_task()
622 netdev_err(bp->dev, macb_tx_error_task()
628 macb_tx_unmap(bp, tx_skb); macb_tx_error_task()
646 macb_writel(bp, TSR, macb_readl(bp, TSR)); macb_tx_error_task()
650 netif_tx_start_all_queues(bp->dev); macb_tx_error_task()
651 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_tx_error_task()
653 spin_unlock_irqrestore(&bp->lock, flags); macb_tx_error_task()
661 struct macb *bp = queue->bp; macb_tx_interrupt() local
662 u16 queue_index = queue - bp->queues; macb_tx_interrupt()
664 status = macb_readl(bp, TSR); macb_tx_interrupt()
665 macb_writel(bp, TSR, status); macb_tx_interrupt()
667 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_tx_interrupt()
670 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", macb_tx_interrupt()
700 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", macb_tx_interrupt()
702 bp->stats.tx_packets++; macb_tx_interrupt()
703 bp->stats.tx_bytes += skb->len; macb_tx_interrupt()
707 macb_tx_unmap(bp, tx_skb); macb_tx_interrupt()
719 if (__netif_subqueue_stopped(bp->dev, queue_index) && macb_tx_interrupt()
722 netif_wake_subqueue(bp->dev, queue_index); macb_tx_interrupt()
725 static void gem_rx_refill(struct macb *bp) gem_rx_refill() argument
731 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { gem_rx_refill()
732 entry = macb_rx_ring_wrap(bp->rx_prepared_head); gem_rx_refill()
737 bp->rx_prepared_head++; gem_rx_refill()
739 if (bp->rx_skbuff[entry] == NULL) { gem_rx_refill()
741 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); gem_rx_refill()
743 netdev_err(bp->dev, gem_rx_refill()
749 paddr = dma_map_single(&bp->pdev->dev, skb->data, gem_rx_refill()
750 bp->rx_buffer_size, DMA_FROM_DEVICE); gem_rx_refill()
751 if (dma_mapping_error(&bp->pdev->dev, paddr)) { gem_rx_refill()
756 bp->rx_skbuff[entry] = skb; gem_rx_refill()
760 bp->rx_ring[entry].addr = paddr; gem_rx_refill()
761 bp->rx_ring[entry].ctrl = 0; gem_rx_refill()
766 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); gem_rx_refill()
767 bp->rx_ring[entry].ctrl = 0; gem_rx_refill()
774 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", gem_rx_refill()
775 bp->rx_prepared_head, bp->rx_tail); gem_rx_refill()
779 static void discard_partial_frame(struct macb *bp, unsigned int begin, discard_partial_frame() argument
785 struct macb_dma_desc *desc = macb_rx_desc(bp, frag); discard_partial_frame()
799 static int gem_rx(struct macb *bp, int budget) gem_rx() argument
810 entry = macb_rx_ring_wrap(bp->rx_tail); gem_rx()
811 desc = &bp->rx_ring[entry]; gem_rx()
822 bp->rx_tail++; gem_rx()
826 netdev_err(bp->dev, gem_rx()
828 bp->stats.rx_dropped++; gem_rx()
831 skb = bp->rx_skbuff[entry]; gem_rx()
833 netdev_err(bp->dev, gem_rx()
835 bp->stats.rx_dropped++; gem_rx()
839 bp->rx_skbuff[entry] = NULL; gem_rx()
840 len = ctrl & bp->rx_frm_len_mask; gem_rx()
842 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); gem_rx()
846 dma_unmap_single(&bp->pdev->dev, addr, gem_rx()
847 bp->rx_buffer_size, DMA_FROM_DEVICE); gem_rx()
849 skb->protocol = eth_type_trans(skb, bp->dev); gem_rx()
851 if (bp->dev->features & NETIF_F_RXCSUM && gem_rx()
852 !(bp->dev->flags & IFF_PROMISC) && gem_rx()
856 bp->stats.rx_packets++; gem_rx()
857 bp->stats.rx_bytes += skb->len; gem_rx()
860 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", gem_rx()
871 gem_rx_refill(bp); gem_rx()
876 static int macb_rx_frame(struct macb *bp, unsigned int first_frag, macb_rx_frame() argument
885 desc = macb_rx_desc(bp, last_frag); macb_rx_frame()
886 len = desc->ctrl & bp->rx_frm_len_mask; macb_rx_frame()
888 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", macb_rx_frame()
901 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); macb_rx_frame()
903 bp->stats.rx_dropped++; macb_rx_frame()
905 desc = macb_rx_desc(bp, frag); macb_rx_frame()
923 unsigned int frag_len = bp->rx_buffer_size; macb_rx_frame()
930 macb_rx_buffer(bp, frag), frag_len); macb_rx_frame()
931 offset += bp->rx_buffer_size; macb_rx_frame()
932 desc = macb_rx_desc(bp, frag); macb_rx_frame()
943 skb->protocol = eth_type_trans(skb, bp->dev); macb_rx_frame()
945 bp->stats.rx_packets++; macb_rx_frame()
946 bp->stats.rx_bytes += skb->len; macb_rx_frame()
947 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", macb_rx_frame()
954 static int macb_rx(struct macb *bp, int budget) macb_rx() argument
960 for (tail = bp->rx_tail; budget > 0; tail++) { macb_rx()
961 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); macb_rx()
975 discard_partial_frame(bp, first_frag, tail); macb_rx()
983 dropped = macb_rx_frame(bp, first_frag, tail); macb_rx()
993 bp->rx_tail = first_frag; macb_rx()
995 bp->rx_tail = tail; macb_rx()
1002 struct macb *bp = container_of(napi, struct macb, napi); macb_poll() local
1006 status = macb_readl(bp, RSR); macb_poll()
1007 macb_writel(bp, RSR, status); macb_poll()
1011 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", macb_poll()
1014 work_done = bp->macbgem_ops.mog_rx(bp, budget); macb_poll()
1019 status = macb_readl(bp, RSR); macb_poll()
1021 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_poll()
1022 macb_writel(bp, ISR, MACB_BIT(RCOMP)); macb_poll()
1025 macb_writel(bp, IER, MACB_RX_INT_FLAGS); macb_poll()
1037 struct macb *bp = queue->bp; macb_interrupt() local
1038 struct net_device *dev = bp->dev; macb_interrupt()
1046 spin_lock(&bp->lock); macb_interrupt()
1055 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", macb_interrupt()
1056 (unsigned int)(queue - bp->queues), macb_interrupt()
1068 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt()
1071 if (napi_schedule_prep(&bp->napi)) { macb_interrupt()
1072 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); macb_interrupt()
1073 __napi_schedule(&bp->napi); macb_interrupt()
1081 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt()
1102 ctrl = macb_readl(bp, NCR); macb_interrupt()
1103 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); macb_interrupt()
1104 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); macb_interrupt()
1106 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt()
1107 macb_writel(bp, ISR, MACB_BIT(RXUBR)); macb_interrupt()
1112 if (macb_is_gem(bp)) macb_interrupt()
1113 bp->hw_stats.gem.rx_overruns++; macb_interrupt()
1115 bp->hw_stats.macb.rx_overruns++; macb_interrupt()
1117 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt()
1129 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) macb_interrupt()
1136 spin_unlock(&bp->lock); macb_interrupt()
1148 struct macb *bp = netdev_priv(dev); macb_poll_controller() local
1154 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) macb_poll_controller()
1160 static unsigned int macb_tx_map(struct macb *bp, macb_tx_map() argument
1177 size = min(len, bp->max_tx_length); macb_tx_map()
1181 mapping = dma_map_single(&bp->pdev->dev, macb_tx_map()
1184 if (dma_mapping_error(&bp->pdev->dev, mapping)) macb_tx_map()
1206 size = min(len, bp->max_tx_length); macb_tx_map()
1210 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, macb_tx_map()
1212 if (dma_mapping_error(&bp->pdev->dev, mapping)) macb_tx_map()
1230 netdev_err(bp->dev, "BUG! empty skb!\n"); macb_tx_map()
1278 netdev_err(bp->dev, "TX DMA map failed\n"); macb_tx_map()
1283 macb_tx_unmap(bp, tx_skb); macb_tx_map()
1292 struct macb *bp = netdev_priv(dev); macb_start_xmit() local
1293 struct macb_queue *queue = &bp->queues[queue_index]; macb_start_xmit()
1298 netdev_vdbg(bp->dev, macb_start_xmit()
1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); macb_start_xmit()
1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length); macb_start_xmit()
1317 spin_lock_irqsave(&bp->lock, flags); macb_start_xmit()
1322 spin_unlock_irqrestore(&bp->lock, flags); macb_start_xmit()
1323 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", macb_start_xmit()
1329 if (!macb_tx_map(bp, queue, skb)) { macb_start_xmit()
1339 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_start_xmit()
1345 spin_unlock_irqrestore(&bp->lock, flags); macb_start_xmit()
1350 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) macb_init_rx_buffer_size() argument
1352 if (!macb_is_gem(bp)) { macb_init_rx_buffer_size()
1353 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; macb_init_rx_buffer_size()
1355 bp->rx_buffer_size = size; macb_init_rx_buffer_size()
1357 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { macb_init_rx_buffer_size()
1358 netdev_dbg(bp->dev, macb_init_rx_buffer_size()
1361 bp->rx_buffer_size = macb_init_rx_buffer_size()
1362 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); macb_init_rx_buffer_size()
1366 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", macb_init_rx_buffer_size()
1367 bp->dev->mtu, bp->rx_buffer_size); macb_init_rx_buffer_size()
1370 static void gem_free_rx_buffers(struct macb *bp) gem_free_rx_buffers() argument
1377 if (!bp->rx_skbuff) gem_free_rx_buffers()
1381 skb = bp->rx_skbuff[i]; gem_free_rx_buffers()
1386 desc = &bp->rx_ring[i]; gem_free_rx_buffers()
1388 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, gem_free_rx_buffers()
1394 kfree(bp->rx_skbuff); gem_free_rx_buffers()
1395 bp->rx_skbuff = NULL; gem_free_rx_buffers()
1398 static void macb_free_rx_buffers(struct macb *bp) macb_free_rx_buffers() argument
1400 if (bp->rx_buffers) { macb_free_rx_buffers()
1401 dma_free_coherent(&bp->pdev->dev, macb_free_rx_buffers()
1402 RX_RING_SIZE * bp->rx_buffer_size, macb_free_rx_buffers()
1403 bp->rx_buffers, bp->rx_buffers_dma); macb_free_rx_buffers()
1404 bp->rx_buffers = NULL; macb_free_rx_buffers()
1408 static void macb_free_consistent(struct macb *bp) macb_free_consistent() argument
1413 bp->macbgem_ops.mog_free_rx_buffers(bp); macb_free_consistent()
1414 if (bp->rx_ring) { macb_free_consistent()
1415 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, macb_free_consistent()
1416 bp->rx_ring, bp->rx_ring_dma); macb_free_consistent()
1417 bp->rx_ring = NULL; macb_free_consistent()
1420 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_free_consistent()
1424 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, macb_free_consistent()
1431 static int gem_alloc_rx_buffers(struct macb *bp) gem_alloc_rx_buffers() argument
1436 bp->rx_skbuff = kzalloc(size, GFP_KERNEL); gem_alloc_rx_buffers()
1437 if (!bp->rx_skbuff) gem_alloc_rx_buffers()
1440 netdev_dbg(bp->dev, gem_alloc_rx_buffers()
1442 RX_RING_SIZE, bp->rx_skbuff); gem_alloc_rx_buffers()
1446 static int macb_alloc_rx_buffers(struct macb *bp) macb_alloc_rx_buffers() argument
1450 size = RX_RING_SIZE * bp->rx_buffer_size; macb_alloc_rx_buffers()
1451 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_rx_buffers()
1452 &bp->rx_buffers_dma, GFP_KERNEL); macb_alloc_rx_buffers()
1453 if (!bp->rx_buffers) macb_alloc_rx_buffers()
1456 netdev_dbg(bp->dev, macb_alloc_rx_buffers()
1458 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); macb_alloc_rx_buffers()
1462 static int macb_alloc_consistent(struct macb *bp) macb_alloc_consistent() argument
1468 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_alloc_consistent()
1470 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_consistent()
1475 netdev_dbg(bp->dev, macb_alloc_consistent()
1487 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_consistent()
1488 &bp->rx_ring_dma, GFP_KERNEL); macb_alloc_consistent()
1489 if (!bp->rx_ring) macb_alloc_consistent()
1491 netdev_dbg(bp->dev, macb_alloc_consistent()
1493 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); macb_alloc_consistent()
1495 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) macb_alloc_consistent()
1501 macb_free_consistent(bp); macb_alloc_consistent()
1505 static void gem_init_rings(struct macb *bp) gem_init_rings() argument
1511 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { gem_init_rings()
1521 bp->rx_tail = 0; gem_init_rings()
1522 bp->rx_prepared_head = 0; gem_init_rings()
1524 gem_rx_refill(bp); gem_init_rings()
1527 static void macb_init_rings(struct macb *bp) macb_init_rings() argument
1532 addr = bp->rx_buffers_dma; macb_init_rings()
1534 bp->rx_ring[i].addr = addr; macb_init_rings()
1535 bp->rx_ring[i].ctrl = 0; macb_init_rings()
1536 addr += bp->rx_buffer_size; macb_init_rings()
1538 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); macb_init_rings()
1541 bp->queues[0].tx_ring[i].addr = 0; macb_init_rings()
1542 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); macb_init_rings()
1544 bp->queues[0].tx_head = 0; macb_init_rings()
1545 bp->queues[0].tx_tail = 0; macb_init_rings()
1546 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); macb_init_rings()
1548 bp->rx_tail = 0; macb_init_rings()
1551 static void macb_reset_hw(struct macb *bp) macb_reset_hw() argument
1560 macb_writel(bp, NCR, 0); macb_reset_hw()
1563 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); macb_reset_hw()
1566 macb_writel(bp, TSR, -1); macb_reset_hw()
1567 macb_writel(bp, RSR, -1); macb_reset_hw()
1570 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_reset_hw()
1576 static u32 gem_mdc_clk_div(struct macb *bp) gem_mdc_clk_div() argument
1579 unsigned long pclk_hz = clk_get_rate(bp->pclk); gem_mdc_clk_div()
1597 static u32 macb_mdc_clk_div(struct macb *bp) macb_mdc_clk_div() argument
1602 if (macb_is_gem(bp)) macb_mdc_clk_div()
1603 return gem_mdc_clk_div(bp); macb_mdc_clk_div()
1605 pclk_hz = clk_get_rate(bp->pclk); macb_mdc_clk_div()
1623 static u32 macb_dbw(struct macb *bp) macb_dbw() argument
1625 if (!macb_is_gem(bp)) macb_dbw()
1628 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { macb_dbw()
1647 static void macb_configure_dma(struct macb *bp) macb_configure_dma() argument
1651 if (macb_is_gem(bp)) { macb_configure_dma()
1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); macb_configure_dma()
1653 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); macb_configure_dma()
1654 if (bp->dma_burst_length) macb_configure_dma()
1655 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); macb_configure_dma()
1659 if (bp->native_io) macb_configure_dma()
1664 if (bp->dev->features & NETIF_F_HW_CSUM) macb_configure_dma()
1668 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", macb_configure_dma()
1670 gem_writel(bp, DMACFG, dmacfg); macb_configure_dma()
1674 static void macb_init_hw(struct macb *bp) macb_init_hw() argument
1681 macb_reset_hw(bp); macb_init_hw()
1682 macb_set_hwaddr(bp); macb_init_hw()
1684 config = macb_mdc_clk_div(bp); macb_init_hw()
1685 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) macb_init_hw()
1690 if (bp->caps & MACB_CAPS_JUMBO) macb_init_hw()
1694 if (bp->dev->flags & IFF_PROMISC) macb_init_hw()
1696 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) macb_init_hw()
1698 if (!(bp->dev->flags & IFF_BROADCAST)) macb_init_hw()
1700 config |= macb_dbw(bp); macb_init_hw()
1701 macb_writel(bp, NCFGR, config); macb_init_hw()
1702 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) macb_init_hw()
1703 gem_writel(bp, JML, bp->jumbo_max_len); macb_init_hw()
1704 bp->speed = SPEED_10; macb_init_hw()
1705 bp->duplex = DUPLEX_HALF; macb_init_hw()
1706 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; macb_init_hw()
1707 if (bp->caps & MACB_CAPS_JUMBO) macb_init_hw()
1708 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; macb_init_hw()
1710 macb_configure_dma(bp); macb_init_hw()
1713 macb_writel(bp, RBQP, bp->rx_ring_dma); macb_init_hw()
1714 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_init_hw()
1725 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); macb_init_hw()
1795 struct macb *bp = netdev_priv(dev); macb_sethashtable() local
1804 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1805 macb_or_gem_writel(bp, HRT, mc_filter[1]);
1814 struct macb *bp = netdev_priv(dev); macb_set_rx_mode() local
1816 cfg = macb_readl(bp, NCFGR); macb_set_rx_mode()
1823 if (macb_is_gem(bp)) macb_set_rx_mode()
1830 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) macb_set_rx_mode()
1836 macb_or_gem_writel(bp, HRB, -1); macb_set_rx_mode()
1837 macb_or_gem_writel(bp, HRT, -1); macb_set_rx_mode()
1845 macb_or_gem_writel(bp, HRB, 0); macb_set_rx_mode()
1846 macb_or_gem_writel(bp, HRT, 0); macb_set_rx_mode()
1850 macb_writel(bp, NCFGR, cfg); macb_set_rx_mode()
1855 struct macb *bp = netdev_priv(dev); macb_open() local
1859 netdev_dbg(bp->dev, "open\n"); macb_open()
1865 if (!bp->phy_dev) macb_open()
1869 macb_init_rx_buffer_size(bp, bufsz); macb_open()
1871 err = macb_alloc_consistent(bp); macb_open()
1878 napi_enable(&bp->napi); macb_open()
1880 bp->macbgem_ops.mog_init_rings(bp); macb_open()
1881 macb_init_hw(bp); macb_open()
1884 phy_start(bp->phy_dev); macb_open()
1893 struct macb *bp = netdev_priv(dev); macb_close() local
1897 napi_disable(&bp->napi); macb_close()
1899 if (bp->phy_dev) macb_close()
1900 phy_stop(bp->phy_dev); macb_close()
1902 spin_lock_irqsave(&bp->lock, flags); macb_close()
1903 macb_reset_hw(bp); macb_close()
1905 spin_unlock_irqrestore(&bp->lock, flags); macb_close()
1907 macb_free_consistent(bp); macb_close()
1914 struct macb *bp = netdev_priv(dev); macb_change_mtu() local
1921 if (bp->caps & MACB_CAPS_JUMBO) macb_change_mtu()
1922 max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; macb_change_mtu()
1932 static void gem_update_stats(struct macb *bp) gem_update_stats() argument
1935 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; gem_update_stats()
1939 u64 val = bp->macb_reg_readl(bp, offset); gem_update_stats()
1941 bp->ethtool_stats[i] += val; gem_update_stats()
1946 val = bp->macb_reg_readl(bp, offset + 4); gem_update_stats()
1947 bp->ethtool_stats[i] += ((u64)val) << 32; gem_update_stats()
1953 static struct net_device_stats *gem_get_stats(struct macb *bp) gem_get_stats() argument
1955 struct gem_stats *hwstat = &bp->hw_stats.gem; gem_get_stats()
1956 struct net_device_stats *nstat = &bp->stats; gem_get_stats()
1958 gem_update_stats(bp); gem_get_stats()
1994 struct macb *bp; gem_get_ethtool_stats() local
1996 bp = netdev_priv(dev); gem_get_ethtool_stats()
1997 gem_update_stats(bp); gem_get_ethtool_stats()
1998 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); gem_get_ethtool_stats()
2026 struct macb *bp = netdev_priv(dev); macb_get_stats() local
2027 struct net_device_stats *nstat = &bp->stats; macb_get_stats()
2028 struct macb_stats *hwstat = &bp->hw_stats.macb; macb_get_stats()
2030 if (macb_is_gem(bp)) macb_get_stats()
2031 return gem_get_stats(bp); macb_get_stats()
2034 macb_update_stats(bp); macb_get_stats()
2073 struct macb *bp = netdev_priv(dev); macb_get_settings() local
2074 struct phy_device *phydev = bp->phy_dev; macb_get_settings()
2084 struct macb *bp = netdev_priv(dev); macb_set_settings() local
2085 struct phy_device *phydev = bp->phy_dev; macb_set_settings()
2101 struct macb *bp = netdev_priv(dev); macb_get_regs() local
2105 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) macb_get_regs()
2108 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); macb_get_regs()
2109 head = macb_tx_ring_wrap(bp->queues[0].tx_head); macb_get_regs()
2111 regs_buff[0] = macb_readl(bp, NCR); macb_get_regs()
2112 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); macb_get_regs()
2113 regs_buff[2] = macb_readl(bp, NSR); macb_get_regs()
2114 regs_buff[3] = macb_readl(bp, TSR); macb_get_regs()
2115 regs_buff[4] = macb_readl(bp, RBQP); macb_get_regs()
2116 regs_buff[5] = macb_readl(bp, TBQP); macb_get_regs()
2117 regs_buff[6] = macb_readl(bp, RSR); macb_get_regs()
2118 regs_buff[7] = macb_readl(bp, IMR); macb_get_regs()
2122 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); macb_get_regs()
2123 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); macb_get_regs()
2125 regs_buff[12] = macb_or_gem_readl(bp, USRIO); macb_get_regs()
2126 if (macb_is_gem(bp)) { macb_get_regs()
2127 regs_buff[13] = gem_readl(bp, DMACFG); macb_get_regs()
2154 struct macb *bp = netdev_priv(dev); macb_ioctl() local
2155 struct phy_device *phydev = bp->phy_dev; macb_ioctl()
2169 struct macb *bp = netdev_priv(netdev); macb_set_features() local
2173 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { macb_set_features()
2176 dmacfg = gem_readl(bp, DMACFG); macb_set_features()
2181 gem_writel(bp, DMACFG, dmacfg); macb_set_features()
2185 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { macb_set_features()
2188 netcfg = gem_readl(bp, NCFGR); macb_set_features()
2194 gem_writel(bp, NCFGR, netcfg); macb_set_features()
2220 static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) macb_configure_caps() argument
2225 bp->caps = dt_conf->caps; macb_configure_caps()
2227 if (hw_is_gem(bp->regs, bp->native_io)) { macb_configure_caps()
2228 bp->caps |= MACB_CAPS_MACB_IS_GEM; macb_configure_caps()
2230 dcfg = gem_readl(bp, DCFG1); macb_configure_caps()
2232 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; macb_configure_caps()
2233 dcfg = gem_readl(bp, DCFG2); macb_configure_caps()
2235 bp->caps |= MACB_CAPS_FIFO_MODE; macb_configure_caps()
2238 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); macb_configure_caps()
2326 struct macb *bp = netdev_priv(dev); macb_init() local
2336 if (!(bp->queue_mask & (1 << hw_q))) macb_init()
2339 queue = &bp->queues[q]; macb_init()
2340 queue->bp = bp; macb_init()
2376 netif_napi_add(dev, &bp->napi, macb_poll, 64); macb_init()
2379 if (macb_is_gem(bp)) { macb_init()
2380 bp->max_tx_length = GEM_MAX_TX_LEN; macb_init()
2381 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; macb_init()
2382 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; macb_init()
2383 bp->macbgem_ops.mog_init_rings = gem_init_rings; macb_init()
2384 bp->macbgem_ops.mog_rx = gem_rx; macb_init()
2387 bp->max_tx_length = MACB_MAX_TX_LEN; macb_init()
2388 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; macb_init()
2389 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; macb_init()
2390 bp->macbgem_ops.mog_init_rings = macb_init_rings; macb_init()
2391 bp->macbgem_ops.mog_rx = macb_rx; macb_init()
2398 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) macb_init()
2400 if (bp->caps & MACB_CAPS_SG_DISABLED) macb_init()
2405 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) macb_init()
2407 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && macb_init()
2408 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) macb_init()
2410 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) macb_init()
2413 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) macb_init()
2416 macb_or_gem_writel(bp, USRIO, val); macb_init()
2419 val = macb_mdc_clk_div(bp); macb_init()
2420 val |= macb_dbw(bp); macb_init()
2421 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) macb_init()
2423 macb_writel(bp, NCFGR, val); macb_init()
2717 struct macb *bp = netdev_priv(dev); at91ether_init() local
2729 macb_writel(bp, NCR, 0); at91ether_init()
2732 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) at91ether_init()
2735 macb_writel(bp, NCFGR, reg); at91ether_init()
2830 struct macb *bp; macb_probe() local
2856 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); macb_probe()
2866 bp = netdev_priv(dev); macb_probe()
2867 bp->pdev = pdev; macb_probe()
2868 bp->dev = dev; macb_probe()
2869 bp->regs = mem; macb_probe()
2870 bp->native_io = native_io; macb_probe()
2872 bp->macb_reg_readl = hw_readl_native; macb_probe()
2873 bp->macb_reg_writel = hw_writel_native; macb_probe()
2875 bp->macb_reg_readl = hw_readl; macb_probe()
2876 bp->macb_reg_writel = hw_writel; macb_probe()
2878 bp->num_queues = num_queues; macb_probe()
2879 bp->queue_mask = queue_mask; macb_probe()
2881 bp->dma_burst_length = macb_config->dma_burst_length; macb_probe()
2882 bp->pclk = pclk; macb_probe()
2883 bp->hclk = hclk; macb_probe()
2884 bp->tx_clk = tx_clk; macb_probe()
2886 bp->jumbo_max_len = macb_config->jumbo_max_len; macb_probe()
2888 spin_lock_init(&bp->lock); macb_probe()
2891 macb_configure_caps(bp, macb_config); macb_probe()
2903 memcpy(bp->dev->dev_addr, mac, ETH_ALEN); macb_probe()
2905 macb_get_hwaddr(bp); macb_probe()
2911 bp->phy_interface = PHY_INTERFACE_MODE_RMII; macb_probe()
2913 bp->phy_interface = PHY_INTERFACE_MODE_MII; macb_probe()
2915 bp->phy_interface = err; macb_probe()
2929 err = macb_mii_init(bp); macb_probe()
2936 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), macb_probe()
2939 phydev = bp->phy_dev; macb_probe()
2962 struct macb *bp; macb_remove() local
2967 bp = netdev_priv(dev); macb_remove()
2968 if (bp->phy_dev) macb_remove()
2969 phy_disconnect(bp->phy_dev); macb_remove()
2970 mdiobus_unregister(bp->mii_bus); macb_remove()
2971 kfree(bp->mii_bus->irq); macb_remove()
2972 mdiobus_free(bp->mii_bus); macb_remove()
2974 clk_disable_unprepare(bp->tx_clk); macb_remove()
2975 clk_disable_unprepare(bp->hclk); macb_remove()
2976 clk_disable_unprepare(bp->pclk); macb_remove()
2987 struct macb *bp = netdev_priv(netdev); macb_suspend() local
2992 clk_disable_unprepare(bp->tx_clk); macb_suspend()
2993 clk_disable_unprepare(bp->hclk); macb_suspend()
2994 clk_disable_unprepare(bp->pclk); macb_suspend()
3003 struct macb *bp = netdev_priv(netdev); macb_resume() local
3005 clk_prepare_enable(bp->pclk); macb_resume()
3006 clk_prepare_enable(bp->hclk); macb_resume()
3007 clk_prepare_enable(bp->tx_clk); macb_resume()
H A Dmacb.h441 #define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
442 #define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
753 int (*mog_alloc_rx_buffers)(struct macb *bp);
754 void (*mog_free_rx_buffers)(struct macb *bp);
755 void (*mog_init_rings)(struct macb *bp);
756 int (*mog_rx)(struct macb *bp, int budget);
769 struct macb *bp; member in struct:macb_queue
790 u32 (*macb_reg_readl)(struct macb *bp, int offset);
791 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
845 static inline bool macb_is_gem(struct macb *bp) macb_is_gem() argument
847 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); macb_is_gem()
/linux-4.4.14/kernel/events/
H A Dhw_breakpoint.c74 /* Gather the number of total pinned and un-pinned bp in a cpuset */
83 __weak int hw_breakpoint_weight(struct perf_event *bp) hw_breakpoint_weight() argument
88 static inline enum bp_type_idx find_slot_idx(struct perf_event *bp) find_slot_idx() argument
90 if (bp->attr.bp_type & HW_BREAKPOINT_RW) find_slot_idx()
117 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) task_bp_pinned() argument
119 struct task_struct *tsk = bp->hw.target; task_bp_pinned()
133 static const struct cpumask *cpumask_of_bp(struct perf_event *bp) cpumask_of_bp() argument
135 if (bp->cpu >= 0) cpumask_of_bp()
136 return cpumask_of(bp->cpu); cpumask_of_bp()
145 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, fetch_bp_busy_slots() argument
148 const struct cpumask *cpumask = cpumask_of_bp(bp); fetch_bp_busy_slots()
156 if (!bp->hw.target) for_each_cpu()
159 nr += task_bp_pinned(cpu, bp, type); for_each_cpu()
184 static void toggle_bp_task_slot(struct perf_event *bp, int cpu, toggle_bp_task_slot() argument
190 old_idx = task_bp_pinned(cpu, bp, type) - 1; toggle_bp_task_slot()
203 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, toggle_bp_slot() argument
206 const struct cpumask *cpumask = cpumask_of_bp(bp); toggle_bp_slot()
213 if (!bp->hw.target) { toggle_bp_slot()
214 get_bp_info(bp->cpu, type)->cpu_pinned += weight; toggle_bp_slot()
220 toggle_bp_task_slot(bp, cpu, type, weight); toggle_bp_slot()
223 list_add_tail(&bp->hw.bp_list, &bp_task_head); toggle_bp_slot()
225 list_del(&bp->hw.bp_list); toggle_bp_slot()
231 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp) arch_unregister_hw_breakpoint() argument
261 * bp for every cpu and we keep the max one. Same for the per tasks
280 static int __reserve_bp_slot(struct perf_event *bp) __reserve_bp_slot() argument
291 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY || __reserve_bp_slot()
292 bp->attr.bp_type == HW_BREAKPOINT_INVALID) __reserve_bp_slot()
295 type = find_slot_idx(bp); __reserve_bp_slot()
296 weight = hw_breakpoint_weight(bp); __reserve_bp_slot()
298 fetch_bp_busy_slots(&slots, bp, type); __reserve_bp_slot()
309 toggle_bp_slot(bp, true, type, weight); __reserve_bp_slot()
314 int reserve_bp_slot(struct perf_event *bp) reserve_bp_slot() argument
320 ret = __reserve_bp_slot(bp); reserve_bp_slot()
327 static void __release_bp_slot(struct perf_event *bp) __release_bp_slot() argument
332 type = find_slot_idx(bp); __release_bp_slot()
333 weight = hw_breakpoint_weight(bp); __release_bp_slot()
334 toggle_bp_slot(bp, false, type, weight); __release_bp_slot()
337 void release_bp_slot(struct perf_event *bp) release_bp_slot() argument
341 arch_unregister_hw_breakpoint(bp); release_bp_slot()
342 __release_bp_slot(bp); release_bp_slot()
352 int dbg_reserve_bp_slot(struct perf_event *bp) dbg_reserve_bp_slot() argument
357 return __reserve_bp_slot(bp); dbg_reserve_bp_slot()
360 int dbg_release_bp_slot(struct perf_event *bp) dbg_release_bp_slot() argument
365 __release_bp_slot(bp); dbg_release_bp_slot()
370 static int validate_hw_breakpoint(struct perf_event *bp) validate_hw_breakpoint() argument
374 ret = arch_validate_hwbkpt_settings(bp); validate_hw_breakpoint()
378 if (arch_check_bp_in_kernelspace(bp)) { validate_hw_breakpoint()
379 if (bp->attr.exclude_kernel) validate_hw_breakpoint()
392 int register_perf_hw_breakpoint(struct perf_event *bp) register_perf_hw_breakpoint() argument
396 ret = reserve_bp_slot(bp); register_perf_hw_breakpoint()
400 ret = validate_hw_breakpoint(bp); register_perf_hw_breakpoint()
402 /* if arch_validate_hwbkpt_settings() fails then release bp slot */ register_perf_hw_breakpoint()
404 release_bp_slot(bp); register_perf_hw_breakpoint()
428 * @bp: the breakpoint structure to modify
433 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) modify_user_hw_breakpoint() argument
435 u64 old_addr = bp->attr.bp_addr; modify_user_hw_breakpoint()
436 u64 old_len = bp->attr.bp_len; modify_user_hw_breakpoint()
437 int old_type = bp->attr.bp_type; modify_user_hw_breakpoint()
446 if (irqs_disabled() && bp->ctx && bp->ctx->task == current) modify_user_hw_breakpoint()
447 __perf_event_disable(bp); modify_user_hw_breakpoint()
449 perf_event_disable(bp); modify_user_hw_breakpoint()
451 bp->attr.bp_addr = attr->bp_addr; modify_user_hw_breakpoint()
452 bp->attr.bp_type = attr->bp_type; modify_user_hw_breakpoint()
453 bp->attr.bp_len = attr->bp_len; modify_user_hw_breakpoint()
458 err = validate_hw_breakpoint(bp); modify_user_hw_breakpoint()
460 perf_event_enable(bp); modify_user_hw_breakpoint()
463 bp->attr.bp_addr = old_addr; modify_user_hw_breakpoint()
464 bp->attr.bp_type = old_type; modify_user_hw_breakpoint()
465 bp->attr.bp_len = old_len; modify_user_hw_breakpoint()
466 if (!bp->attr.disabled) modify_user_hw_breakpoint()
467 perf_event_enable(bp); modify_user_hw_breakpoint()
473 bp->attr.disabled = attr->disabled; modify_user_hw_breakpoint()
481 * @bp: the breakpoint structure to unregister
483 void unregister_hw_breakpoint(struct perf_event *bp) unregister_hw_breakpoint() argument
485 if (!bp) unregister_hw_breakpoint()
487 perf_event_release_kernel(bp); unregister_hw_breakpoint()
503 struct perf_event * __percpu *cpu_events, *bp; register_wide_hw_breakpoint() local
513 bp = perf_event_create_kernel_counter(attr, cpu, NULL, for_each_online_cpu()
515 if (IS_ERR(bp)) { for_each_online_cpu()
516 err = PTR_ERR(bp); for_each_online_cpu()
520 per_cpu(*cpu_events, cpu) = bp; for_each_online_cpu()
558 static int hw_breakpoint_event_init(struct perf_event *bp) hw_breakpoint_event_init() argument
562 if (bp->attr.type != PERF_TYPE_BREAKPOINT) hw_breakpoint_event_init()
568 if (has_branch_stack(bp)) hw_breakpoint_event_init()
571 err = register_perf_hw_breakpoint(bp); hw_breakpoint_event_init()
575 bp->destroy = bp_perf_event_destroy; hw_breakpoint_event_init()
580 static int hw_breakpoint_add(struct perf_event *bp, int flags) hw_breakpoint_add() argument
583 bp->hw.state = PERF_HES_STOPPED; hw_breakpoint_add()
585 if (is_sampling_event(bp)) { hw_breakpoint_add()
586 bp->hw.last_period = bp->hw.sample_period; hw_breakpoint_add()
587 perf_swevent_set_period(bp); hw_breakpoint_add()
590 return arch_install_hw_breakpoint(bp); hw_breakpoint_add()
593 static void hw_breakpoint_del(struct perf_event *bp, int flags) hw_breakpoint_del() argument
595 arch_uninstall_hw_breakpoint(bp); hw_breakpoint_del()
598 static void hw_breakpoint_start(struct perf_event *bp, int flags) hw_breakpoint_start() argument
600 bp->hw.state = 0; hw_breakpoint_start()
603 static void hw_breakpoint_stop(struct perf_event *bp, int flags) hw_breakpoint_stop() argument
605 bp->hw.state = PERF_HES_STOPPED; hw_breakpoint_stop()
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_symlink_remote.c57 struct xfs_buf *bp) xfs_symlink_hdr_set()
59 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_hdr_set()
70 dsl->sl_blkno = cpu_to_be64(bp->b_bn); xfs_symlink_hdr_set()
71 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink_hdr_set()
86 struct xfs_buf *bp) xfs_symlink_hdr_ok()
88 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_hdr_ok()
103 struct xfs_buf *bp) xfs_symlink_verify()
105 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_verify()
106 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_verify()
114 if (bp->b_bn != be64_to_cpu(dsl->sl_blkno)) xfs_symlink_verify()
129 struct xfs_buf *bp) xfs_symlink_read_verify()
131 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_read_verify()
137 if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF)) xfs_symlink_read_verify()
138 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_symlink_read_verify()
139 else if (!xfs_symlink_verify(bp)) xfs_symlink_read_verify()
140 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_symlink_read_verify()
142 if (bp->b_error) xfs_symlink_read_verify()
143 xfs_verifier_error(bp); xfs_symlink_read_verify()
148 struct xfs_buf *bp) xfs_symlink_write_verify()
150 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_symlink_write_verify()
151 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_symlink_write_verify()
157 if (!xfs_symlink_verify(bp)) { xfs_symlink_write_verify()
158 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_symlink_write_verify()
159 xfs_verifier_error(bp); xfs_symlink_write_verify()
164 struct xfs_dsymlink_hdr *dsl = bp->b_addr; xfs_symlink_write_verify()
167 xfs_buf_update_cksum(bp, XFS_SYMLINK_CRC_OFF); xfs_symlink_write_verify()
179 struct xfs_buf *bp, xfs_symlink_local_to_remote()
186 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF); xfs_symlink_local_to_remote()
189 bp->b_ops = NULL; xfs_symlink_local_to_remote()
190 memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); xfs_symlink_local_to_remote()
191 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_symlink_local_to_remote()
199 ASSERT(BBTOB(bp->b_length) >= xfs_symlink_local_to_remote()
202 bp->b_ops = &xfs_symlink_buf_ops; xfs_symlink_local_to_remote()
204 buf = bp->b_addr; xfs_symlink_local_to_remote()
205 buf += xfs_symlink_hdr_set(mp, ip->i_ino, 0, ifp->if_bytes, bp); xfs_symlink_local_to_remote()
207 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsymlink_hdr) + xfs_symlink_local_to_remote()
52 xfs_symlink_hdr_set( struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset, uint32_t size, struct xfs_buf *bp) xfs_symlink_hdr_set() argument
82 xfs_symlink_hdr_ok( xfs_ino_t ino, uint32_t offset, uint32_t size, struct xfs_buf *bp) xfs_symlink_hdr_ok() argument
102 xfs_symlink_verify( struct xfs_buf *bp) xfs_symlink_verify() argument
128 xfs_symlink_read_verify( struct xfs_buf *bp) xfs_symlink_read_verify() argument
147 xfs_symlink_write_verify( struct xfs_buf *bp) xfs_symlink_write_verify() argument
177 xfs_symlink_local_to_remote( struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp) xfs_symlink_local_to_remote() argument
H A Dxfs_dquot_buf.c177 struct xfs_buf *bp) xfs_dquot_buf_verify_crc()
179 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dquot_buf_verify_crc()
195 XFS_BB_TO_FSB(mp, bp->b_length)); xfs_dquot_buf_verify_crc()
210 struct xfs_buf *bp, xfs_dquot_buf_verify()
213 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; xfs_dquot_buf_verify()
226 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); xfs_dquot_buf_verify()
253 struct xfs_buf *bp) xfs_dquot_buf_read_verify()
255 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_read_verify()
257 if (!xfs_dquot_buf_verify_crc(mp, bp)) xfs_dquot_buf_read_verify()
258 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dquot_buf_read_verify()
259 else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) xfs_dquot_buf_read_verify()
260 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dquot_buf_read_verify()
262 if (bp->b_error) xfs_dquot_buf_read_verify()
263 xfs_verifier_error(bp); xfs_dquot_buf_read_verify()
274 struct xfs_buf *bp) xfs_dquot_buf_readahead_verify()
276 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_readahead_verify()
278 if (!xfs_dquot_buf_verify_crc(mp, bp) || xfs_dquot_buf_readahead_verify()
279 !xfs_dquot_buf_verify(mp, bp, 0)) { xfs_dquot_buf_readahead_verify()
280 xfs_buf_ioerror(bp, -EIO); xfs_dquot_buf_readahead_verify()
281 bp->b_flags &= ~XBF_DONE; xfs_dquot_buf_readahead_verify()
292 struct xfs_buf *bp) xfs_dquot_buf_write_verify()
294 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dquot_buf_write_verify()
296 if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) { xfs_dquot_buf_write_verify()
297 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dquot_buf_write_verify()
298 xfs_verifier_error(bp); xfs_dquot_buf_write_verify()
175 xfs_dquot_buf_verify_crc( struct xfs_mount *mp, struct xfs_buf *bp) xfs_dquot_buf_verify_crc() argument
208 xfs_dquot_buf_verify( struct xfs_mount *mp, struct xfs_buf *bp, int warn) xfs_dquot_buf_verify() argument
252 xfs_dquot_buf_read_verify( struct xfs_buf *bp) xfs_dquot_buf_read_verify() argument
273 xfs_dquot_buf_readahead_verify( struct xfs_buf *bp) xfs_dquot_buf_readahead_verify() argument
291 xfs_dquot_buf_write_verify( struct xfs_buf *bp) xfs_dquot_buf_write_verify() argument
H A Dxfs_dir2_block.c42 static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, struct xfs_buf *bp,
44 static void xfs_dir2_block_log_tail(xfs_trans_t *tp, struct xfs_buf *bp);
63 struct xfs_buf *bp) xfs_dir3_block_verify()
65 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_verify()
66 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_verify()
73 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_block_verify()
81 if (__xfs_dir3_data_check(NULL, bp)) xfs_dir3_block_verify()
88 struct xfs_buf *bp) xfs_dir3_block_read_verify()
90 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_read_verify()
93 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) xfs_dir3_block_read_verify()
94 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_block_read_verify()
95 else if (!xfs_dir3_block_verify(bp)) xfs_dir3_block_read_verify()
96 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_block_read_verify()
98 if (bp->b_error) xfs_dir3_block_read_verify()
99 xfs_verifier_error(bp); xfs_dir3_block_read_verify()
104 struct xfs_buf *bp) xfs_dir3_block_write_verify()
106 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_block_write_verify()
107 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_block_write_verify()
108 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_write_verify()
110 if (!xfs_dir3_block_verify(bp)) { xfs_dir3_block_write_verify()
111 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_block_write_verify()
112 xfs_verifier_error(bp); xfs_dir3_block_write_verify()
122 xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF); xfs_dir3_block_write_verify()
151 struct xfs_buf *bp, xfs_dir3_block_init()
154 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_block_init()
156 bp->b_ops = &xfs_dir3_block_buf_ops; xfs_dir3_block_init()
157 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_BLOCK_BUF); xfs_dir3_block_init()
162 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_dir3_block_init()
286 struct xfs_buf *bp, xfs_dir2_block_compact()
318 xfs_dir2_data_make_free(args, bp, xfs_dir2_block_compact()
340 struct xfs_buf *bp; /* buffer for block */ xfs_dir2_block_addname() local
367 /* Read the (one and only) directory block into bp. */ xfs_dir2_block_addname()
368 error = xfs_dir3_block_read(tp, dp, &bp); xfs_dir2_block_addname()
377 hdr = bp->b_addr; xfs_dir2_block_addname()
392 xfs_trans_brelse(tp, bp); xfs_dir2_block_addname()
409 error = xfs_dir2_block_to_leaf(args, bp); xfs_dir2_block_addname()
421 xfs_dir2_block_compact(args, bp, hdr, btp, blp, &needlog, xfs_dir2_block_addname()
456 xfs_dir2_data_use_free(args, bp, enddup, xfs_dir2_block_addname()
539 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); xfs_dir2_block_addname()
543 xfs_dir2_data_use_free(args, bp, dup, xfs_dir2_block_addname()
561 xfs_dir2_data_log_header(args, bp); xfs_dir2_block_addname()
562 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_block_addname()
563 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_block_addname()
564 xfs_dir3_data_check(dp, bp); xfs_dir2_block_addname()
574 struct xfs_buf *bp, /* block buffer */ xfs_dir2_block_log_leaf()
578 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_block_log_leaf()
584 xfs_trans_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr), xfs_dir2_block_log_leaf()
594 struct xfs_buf *bp) /* block buffer */ xfs_dir2_block_log_tail()
596 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_block_log_tail()
600 xfs_trans_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr), xfs_dir2_block_log_tail()
614 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_lookup() local
627 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) xfs_dir2_block_lookup()
630 hdr = bp->b_addr; xfs_dir2_block_lookup()
631 xfs_dir3_data_check(dp, bp); xfs_dir2_block_lookup()
646 xfs_trans_brelse(args->trans, bp); xfs_dir2_block_lookup()
662 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_lookup_int() local
679 error = xfs_dir3_block_read(tp, dp, &bp); xfs_dir2_block_lookup_int()
683 hdr = bp->b_addr; xfs_dir2_block_lookup_int()
684 xfs_dir3_data_check(dp, bp); xfs_dir2_block_lookup_int()
702 xfs_trans_brelse(tp, bp); xfs_dir2_block_lookup_int()
732 *bpp = bp; xfs_dir2_block_lookup_int()
750 xfs_trans_brelse(tp, bp); xfs_dir2_block_lookup_int()
764 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_removename() local
782 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { xfs_dir2_block_removename()
787 hdr = bp->b_addr; xfs_dir2_block_removename()
800 xfs_dir2_data_make_free(args, bp, xfs_dir2_block_removename()
807 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_block_removename()
812 xfs_dir2_block_log_leaf(tp, bp, ent, ent); xfs_dir2_block_removename()
819 xfs_dir2_data_log_header(args, bp); xfs_dir2_block_removename()
820 xfs_dir3_data_check(dp, bp); xfs_dir2_block_removename()
831 return xfs_dir2_block_to_sf(args, bp, size, &sfh); xfs_dir2_block_removename()
844 struct xfs_buf *bp; /* block buffer */ xfs_dir2_block_replace() local
857 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { xfs_dir2_block_replace()
861 hdr = bp->b_addr; xfs_dir2_block_replace()
876 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_block_replace()
877 xfs_dir3_data_check(dp, bp); xfs_dir2_block_replace()
1053 struct xfs_buf *bp; /* block buffer */ xfs_dir2_sf_to_block() local
1119 error = xfs_dir3_data_init(args, blkno, &bp); xfs_dir2_sf_to_block()
1124 xfs_dir3_block_init(mp, tp, bp, dp); xfs_dir2_sf_to_block()
1125 hdr = bp->b_addr; xfs_dir2_sf_to_block()
1138 xfs_dir2_data_use_free(args, bp, dup, args->geo->blksize - i, xfs_dir2_sf_to_block()
1152 xfs_dir2_data_use_free(args, bp, dup, xfs_dir2_sf_to_block()
1165 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block()
1179 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block()
1213 xfs_dir2_data_log_unused(args, bp, dup); xfs_dir2_sf_to_block()
1230 xfs_dir2_data_log_entry(args, bp, dep); xfs_dir2_sf_to_block()
1254 xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); xfs_dir2_sf_to_block()
1255 xfs_dir2_block_log_tail(tp, bp); xfs_dir2_sf_to_block()
1256 xfs_dir3_data_check(dp, bp); xfs_dir2_sf_to_block()
62 xfs_dir3_block_verify( struct xfs_buf *bp) xfs_dir3_block_verify() argument
87 xfs_dir3_block_read_verify( struct xfs_buf *bp) xfs_dir3_block_read_verify() argument
103 xfs_dir3_block_write_verify( struct xfs_buf *bp) xfs_dir3_block_write_verify() argument
148 xfs_dir3_block_init( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *dp) xfs_dir3_block_init() argument
284 xfs_dir2_block_compact( struct xfs_da_args *args, struct xfs_buf *bp, struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_block_tail *btp, struct xfs_dir2_leaf_entry *blp, int *needlog, int *lfloghigh, int *lfloglow) xfs_dir2_block_compact() argument
572 xfs_dir2_block_log_leaf( xfs_trans_t *tp, struct xfs_buf *bp, int first, int last) xfs_dir2_block_log_leaf() argument
592 xfs_dir2_block_log_tail( xfs_trans_t *tp, struct xfs_buf *bp) xfs_dir2_block_log_tail() argument
H A Dxfs_da_btree.c114 state->altpath.blk[i].bp = NULL; xfs_da_state_kill_altpath()
133 struct xfs_buf *bp) xfs_da3_node_verify()
135 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_da3_node_verify()
136 struct xfs_da_intnode *hdr = bp->b_addr; xfs_da3_node_verify()
145 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_verify()
152 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) xfs_da3_node_verify()
182 struct xfs_buf *bp) xfs_da3_node_write_verify()
184 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_da3_node_write_verify()
185 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_da3_node_write_verify()
186 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_write_verify()
188 if (!xfs_da3_node_verify(bp)) { xfs_da3_node_write_verify()
189 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_da3_node_write_verify()
190 xfs_verifier_error(bp); xfs_da3_node_write_verify()
200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF); xfs_da3_node_write_verify()
211 struct xfs_buf *bp) xfs_da3_node_read_verify()
213 struct xfs_da_blkinfo *info = bp->b_addr; xfs_da3_node_read_verify()
217 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { xfs_da3_node_read_verify()
218 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_da3_node_read_verify()
223 if (!xfs_da3_node_verify(bp)) { xfs_da3_node_read_verify()
224 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_da3_node_read_verify()
230 bp->b_ops = &xfs_attr3_leaf_buf_ops; xfs_da3_node_read_verify()
231 bp->b_ops->verify_read(bp); xfs_da3_node_read_verify()
235 bp->b_ops = &xfs_dir3_leafn_buf_ops; xfs_da3_node_read_verify()
236 bp->b_ops->verify_read(bp); xfs_da3_node_read_verify()
239 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_da3_node_read_verify()
244 xfs_verifier_error(bp); xfs_da3_node_read_verify()
312 struct xfs_buf *bp; xfs_da3_node_create() local
319 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); xfs_da3_node_create()
322 bp->b_ops = &xfs_da3_node_buf_ops; xfs_da3_node_create()
323 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); xfs_da3_node_create()
324 node = bp->b_addr; xfs_da3_node_create()
327 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_da3_node_create()
331 hdr3->info.blkno = cpu_to_be64(bp->b_bn); xfs_da3_node_create()
340 xfs_trans_log_buf(tp, bp, xfs_da3_node_create()
343 *bpp = bp; xfs_da3_node_create()
359 struct xfs_buf *bp; xfs_da3_split() local
427 addblk->bp = NULL; xfs_da3_split()
455 addblk->bp = NULL; xfs_da3_split()
470 node = oldblk->bp->b_addr; xfs_da3_split()
473 bp = addblk->bp; xfs_da3_split()
476 bp = state->extrablk.bp; xfs_da3_split()
478 node = bp->b_addr; xfs_da3_split()
480 xfs_trans_log_buf(state->args->trans, bp, xfs_da3_split()
484 node = oldblk->bp->b_addr; xfs_da3_split()
487 bp = addblk->bp; xfs_da3_split()
490 bp = state->extrablk.bp; xfs_da3_split()
492 node = bp->b_addr; xfs_da3_split()
494 xfs_trans_log_buf(state->args->trans, bp, xfs_da3_split()
498 addblk->bp = NULL; xfs_da3_split()
518 struct xfs_buf *bp; xfs_da3_root_split() local
540 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); xfs_da3_root_split()
543 node = bp->b_addr; xfs_da3_root_split()
544 oldroot = blk1->bp->b_addr; xfs_da3_root_split()
555 * we are about to copy oldroot to bp, so set up the type xfs_da3_root_split()
556 * of bp while we know exactly what it will be. xfs_da3_root_split()
558 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); xfs_da3_root_split()
573 * we are about to copy oldroot to bp, so set up the type xfs_da3_root_split()
574 * of bp while we know exactly what it will be. xfs_da3_root_split()
576 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); xfs_da3_root_split()
590 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn); xfs_da3_root_split()
592 xfs_trans_log_buf(tp, bp, 0, size - 1); xfs_da3_root_split()
594 bp->b_ops = blk1->bp->b_ops; xfs_da3_root_split()
595 xfs_trans_buf_copy_type(bp, blk1->bp); xfs_da3_root_split()
596 blk1->bp = bp; xfs_da3_root_split()
604 level + 1, &bp, args->whichfork); xfs_da3_root_split()
608 node = bp->b_addr; xfs_da3_root_split()
629 xfs_trans_log_buf(tp, bp, xfs_da3_root_split()
657 node = oldblk->bp->b_addr; xfs_da3_node_split()
678 &newblk->bp, state->args->whichfork); xfs_da3_node_split()
704 node = oldblk->bp->b_addr; xfs_da3_node_split()
758 node1 = blk1->bp->b_addr; xfs_da3_node_rebalance()
759 node2 = blk2->bp->b_addr; xfs_da3_node_rebalance()
824 xfs_trans_log_buf(tp, blk1->bp, xfs_da3_node_rebalance()
842 xfs_trans_log_buf(tp, blk1->bp, xfs_da3_node_rebalance()
846 xfs_trans_log_buf(tp, blk2->bp, xfs_da3_node_rebalance()
856 node1 = blk1->bp->b_addr; xfs_da3_node_rebalance()
857 node2 = blk2->bp->b_addr; xfs_da3_node_rebalance()
892 node = oldblk->bp->b_addr; xfs_da3_node_add()
912 xfs_trans_log_buf(state->args->trans, oldblk->bp, xfs_da3_node_add()
918 xfs_trans_log_buf(state->args->trans, oldblk->bp, xfs_da3_node_add()
1002 drop_blk->bp); xfs_da3_join()
1003 drop_blk->bp = NULL; xfs_da3_join()
1052 struct xfs_buf *bp; xfs_da3_root_join() local
1063 oldroot = root_blk->bp->b_addr; xfs_da3_root_join()
1081 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp, xfs_da3_root_join()
1085 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level); xfs_da3_root_join()
1094 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize); xfs_da3_root_join()
1095 root_blk->bp->b_ops = bp->b_ops; xfs_da3_root_join()
1096 xfs_trans_buf_copy_type(root_blk->bp, bp); xfs_da3_root_join()
1098 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr; xfs_da3_root_join()
1099 da3->blkno = cpu_to_be64(root_blk->bp->b_bn); xfs_da3_root_join()
1101 xfs_trans_log_buf(args->trans, root_blk->bp, 0, xfs_da3_root_join()
1103 error = xfs_da_shrink_inode(args, child, bp); xfs_da3_root_join()
1125 struct xfs_buf *bp; xfs_da3_node_toosmall() local
1142 info = blk->bp->b_addr; xfs_da3_node_toosmall()
1197 blkno, -1, &bp, state->args->whichfork); xfs_da3_node_toosmall()
1201 node = bp->b_addr; xfs_da3_node_toosmall()
1203 xfs_trans_brelse(state->args->trans, bp); xfs_da3_node_toosmall()
1241 struct xfs_buf *bp, xfs_da3_node_lasthash()
1248 node = bp->b_addr; xfs_da3_node_lasthash()
1281 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count); xfs_da3_fixhashpath()
1286 lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count); xfs_da3_fixhashpath()
1291 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count); xfs_da3_fixhashpath()
1299 node = blk->bp->b_addr; xfs_da3_fixhashpath()
1306 xfs_trans_log_buf(state->args->trans, blk->bp, xfs_da3_fixhashpath()
1331 node = drop_blk->bp->b_addr; xfs_da3_node_remove()
1345 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove()
1350 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove()
1354 xfs_trans_log_buf(state->args->trans, drop_blk->bp, xfs_da3_node_remove()
1386 drop_node = drop_blk->bp->b_addr; xfs_da3_node_unbalance()
1387 save_node = save_blk->bp->b_addr; xfs_da3_node_unbalance()
1407 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance()
1413 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance()
1426 xfs_trans_log_buf(tp, save_blk->bp, xfs_da3_node_unbalance()
1487 -1, &blk->bp, args->whichfork); xfs_da3_node_lookup_int()
1493 curr = blk->bp->b_addr; xfs_da3_node_lookup_int()
1499 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); xfs_da3_node_lookup_int()
1507 blk->bp, NULL); xfs_da3_node_lookup_int()
1517 node = blk->bp->b_addr; xfs_da3_node_lookup_int()
1576 retval = xfs_dir2_leafn_lookup_int(blk->bp, args, xfs_da3_node_lookup_int()
1579 retval = xfs_attr3_leaf_lookup_int(blk->bp, args); xfs_da3_node_lookup_int()
1654 struct xfs_buf *bp; xfs_da3_blk_link() local
1664 old_info = old_blk->bp->b_addr; xfs_da3_blk_link()
1665 new_info = new_blk->bp->b_addr; xfs_da3_blk_link()
1672 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp); xfs_da3_blk_link()
1675 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp); xfs_da3_blk_link()
1678 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp); xfs_da3_blk_link()
1695 -1, &bp, args->whichfork); xfs_da3_blk_link()
1698 ASSERT(bp != NULL); xfs_da3_blk_link()
1699 tmp_info = bp->b_addr; xfs_da3_blk_link()
1703 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da3_blk_link()
1716 -1, &bp, args->whichfork); xfs_da3_blk_link()
1719 ASSERT(bp != NULL); xfs_da3_blk_link()
1720 tmp_info = bp->b_addr; xfs_da3_blk_link()
1724 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da3_blk_link()
1729 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); xfs_da3_blk_link()
1730 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); xfs_da3_blk_link()
1747 struct xfs_buf *bp; xfs_da3_blk_unlink() local
1755 save_info = save_blk->bp->b_addr; xfs_da3_blk_unlink()
1756 drop_info = drop_blk->bp->b_addr; xfs_da3_blk_unlink()
1775 -1, &bp, args->whichfork); xfs_da3_blk_unlink()
1778 ASSERT(bp != NULL); xfs_da3_blk_unlink()
1779 tmp_info = bp->b_addr; xfs_da3_blk_unlink()
1783 xfs_trans_log_buf(args->trans, bp, 0, xfs_da3_blk_unlink()
1792 -1, &bp, args->whichfork); xfs_da3_blk_unlink()
1795 ASSERT(bp != NULL); xfs_da3_blk_unlink()
1796 tmp_info = bp->b_addr; xfs_da3_blk_unlink()
1800 xfs_trans_log_buf(args->trans, bp, 0, xfs_da3_blk_unlink()
1805 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); xfs_da3_blk_unlink()
1831 struct xfs_buf *bp; xfs_da3_path_shift() local
1850 node = blk->bp->b_addr; xfs_da3_path_shift()
1878 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp, xfs_da3_path_shift()
1890 xfs_trans_brelse(args->trans, blk->bp); xfs_da3_path_shift()
1892 blk->bp = bp; xfs_da3_path_shift()
1894 info = blk->bp->b_addr; xfs_da3_path_shift()
1926 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); xfs_da3_path_shift()
1934 blk->bp, NULL); xfs_da3_path_shift()
2549 struct xfs_buf *bp; xfs_da_get_buf() local
2567 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, xfs_da_get_buf()
2569 error = bp ? bp->b_error : -EIO; xfs_da_get_buf()
2571 if (bp) xfs_da_get_buf()
2572 xfs_trans_brelse(trans, bp); xfs_da_get_buf()
2576 *bpp = bp; xfs_da_get_buf()
2598 struct xfs_buf *bp; xfs_da_read_buf() local
2618 mapp, nmap, 0, &bp, ops); xfs_da_read_buf()
2623 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF); xfs_da_read_buf()
2625 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF); xfs_da_read_buf()
2626 *bpp = bp; xfs_da_read_buf()
132 xfs_da3_node_verify( struct xfs_buf *bp) xfs_da3_node_verify() argument
181 xfs_da3_node_write_verify( struct xfs_buf *bp) xfs_da3_node_write_verify() argument
210 xfs_da3_node_read_verify( struct xfs_buf *bp) xfs_da3_node_read_verify() argument
1239 xfs_da3_node_lasthash( struct xfs_inode *dp, struct xfs_buf *bp, int *count) xfs_da3_node_lasthash() argument
H A Dxfs_alloc_btree.c106 struct xfs_buf *bp) xfs_allocbt_free_block()
113 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_allocbt_free_block()
122 xfs_trans_binval(cur->bc_tp, bp); xfs_allocbt_free_block()
274 struct xfs_buf *bp) xfs_allocbt_verify()
276 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_allocbt_verify()
277 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_allocbt_verify()
278 struct xfs_perag *pag = bp->b_pag; xfs_allocbt_verify()
300 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_allocbt_verify()
318 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_allocbt_verify()
354 struct xfs_buf *bp) xfs_allocbt_read_verify()
356 if (!xfs_btree_sblock_verify_crc(bp)) xfs_allocbt_read_verify()
357 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_allocbt_read_verify()
358 else if (!xfs_allocbt_verify(bp)) xfs_allocbt_read_verify()
359 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_allocbt_read_verify()
361 if (bp->b_error) { xfs_allocbt_read_verify()
362 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_allocbt_read_verify()
363 xfs_verifier_error(bp); xfs_allocbt_read_verify()
369 struct xfs_buf *bp) xfs_allocbt_write_verify()
371 if (!xfs_allocbt_verify(bp)) { xfs_allocbt_write_verify()
372 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_allocbt_write_verify()
373 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_allocbt_write_verify()
374 xfs_verifier_error(bp); xfs_allocbt_write_verify()
377 xfs_btree_sblock_calc_crc(bp); xfs_allocbt_write_verify()
104 xfs_allocbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_allocbt_free_block() argument
273 xfs_allocbt_verify( struct xfs_buf *bp) xfs_allocbt_verify() argument
353 xfs_allocbt_read_verify( struct xfs_buf *bp) xfs_allocbt_read_verify() argument
368 xfs_allocbt_write_verify( struct xfs_buf *bp) xfs_allocbt_write_verify() argument
H A Dxfs_attr_remote.c120 struct xfs_buf *bp) xfs_attr3_rmt_read_verify()
122 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_rmt_read_verify()
132 ptr = bp->b_addr; xfs_attr3_rmt_read_verify()
133 bno = bp->b_bn; xfs_attr3_rmt_read_verify()
134 len = BBTOB(bp->b_length); xfs_attr3_rmt_read_verify()
139 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_attr3_rmt_read_verify()
143 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_read_verify()
151 if (bp->b_error) xfs_attr3_rmt_read_verify()
152 xfs_verifier_error(bp); xfs_attr3_rmt_read_verify()
159 struct xfs_buf *bp) xfs_attr3_rmt_write_verify()
161 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_rmt_write_verify()
171 ptr = bp->b_addr; xfs_attr3_rmt_write_verify()
172 bno = bp->b_bn; xfs_attr3_rmt_write_verify()
173 len = BBTOB(bp->b_length); xfs_attr3_rmt_write_verify()
180 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_write_verify()
181 xfs_verifier_error(bp); xfs_attr3_rmt_write_verify()
190 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_rmt_write_verify()
191 xfs_verifier_error(bp); xfs_attr3_rmt_write_verify()
251 struct xfs_buf *bp, xfs_attr_rmtval_copyout()
257 char *src = bp->b_addr; xfs_attr_rmtval_copyout()
258 xfs_daddr_t bno = bp->b_bn; xfs_attr_rmtval_copyout()
259 int len = BBTOB(bp->b_length); xfs_attr_rmtval_copyout()
299 struct xfs_buf *bp, xfs_attr_rmtval_copyin()
305 char *dst = bp->b_addr; xfs_attr_rmtval_copyin()
306 xfs_daddr_t bno = bp->b_bn; xfs_attr_rmtval_copyin()
307 int len = BBTOB(bp->b_length); xfs_attr_rmtval_copyin()
355 struct xfs_buf *bp; xfs_attr_rmtval_get() local
389 dblkno, dblkcnt, 0, &bp, xfs_attr_rmtval_get()
394 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino, xfs_attr_rmtval_get()
397 xfs_buf_relse(bp); xfs_attr_rmtval_get()
512 struct xfs_buf *bp; xfs_attr_rmtval_set() local
532 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); xfs_attr_rmtval_set()
533 if (!bp) xfs_attr_rmtval_set()
535 bp->b_ops = &xfs_attr3_rmt_buf_ops; xfs_attr_rmtval_set()
537 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, xfs_attr_rmtval_set()
540 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ xfs_attr_rmtval_set()
541 xfs_buf_relse(bp); xfs_attr_rmtval_set()
577 struct xfs_buf *bp; xfs_attr_rmtval_remove() local
600 bp = xfs_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK); xfs_attr_rmtval_remove()
601 if (bp) { xfs_attr_rmtval_remove()
602 xfs_buf_stale(bp); xfs_attr_rmtval_remove()
603 xfs_buf_relse(bp); xfs_attr_rmtval_remove()
604 bp = NULL; xfs_attr_rmtval_remove()
119 xfs_attr3_rmt_read_verify( struct xfs_buf *bp) xfs_attr3_rmt_read_verify() argument
158 xfs_attr3_rmt_write_verify( struct xfs_buf *bp) xfs_attr3_rmt_write_verify() argument
249 xfs_attr_rmtval_copyout( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **dst) xfs_attr_rmtval_copyout() argument
297 xfs_attr_rmtval_copyin( struct xfs_mount *mp, struct xfs_buf *bp, xfs_ino_t ino, int *offset, int *valuelen, __uint8_t **src) xfs_attr_rmtval_copyin() argument
H A Dxfs_dir2_data.c44 struct xfs_buf *bp) /* data block's buffer */ __xfs_dir3_data_check()
67 mp = bp->b_target->bt_mount; __xfs_dir3_data_check()
76 hdr = bp->b_addr; __xfs_dir3_data_check()
216 struct xfs_buf *bp) xfs_dir3_data_verify()
218 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_verify()
219 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_verify()
226 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_data_verify()
234 if (__xfs_dir3_data_check(NULL, bp)) xfs_dir3_data_verify()
246 struct xfs_buf *bp) xfs_dir3_data_reada_verify()
248 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir3_data_reada_verify()
253 bp->b_ops = &xfs_dir3_block_buf_ops; xfs_dir3_data_reada_verify()
254 bp->b_ops->verify_read(bp); xfs_dir3_data_reada_verify()
258 bp->b_ops = &xfs_dir3_data_buf_ops; xfs_dir3_data_reada_verify()
259 bp->b_ops->verify_read(bp); xfs_dir3_data_reada_verify()
262 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_reada_verify()
263 xfs_verifier_error(bp); xfs_dir3_data_reada_verify()
270 struct xfs_buf *bp) xfs_dir3_data_read_verify()
272 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_read_verify()
275 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) xfs_dir3_data_read_verify()
276 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_data_read_verify()
277 else if (!xfs_dir3_data_verify(bp)) xfs_dir3_data_read_verify()
278 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_read_verify()
280 if (bp->b_error) xfs_dir3_data_read_verify()
281 xfs_verifier_error(bp); xfs_dir3_data_read_verify()
286 struct xfs_buf *bp) xfs_dir3_data_write_verify()
288 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_data_write_verify()
289 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_data_write_verify()
290 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_write_verify()
292 if (!xfs_dir3_data_verify(bp)) { xfs_dir3_data_write_verify()
293 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_data_write_verify()
294 xfs_verifier_error(bp); xfs_dir3_data_write_verify()
304 xfs_buf_update_cksum(bp, XFS_DIR3_DATA_CRC_OFF); xfs_dir3_data_write_verify()
578 struct xfs_buf *bp; /* block buffer */ xfs_dir3_data_init() local
596 -1, &bp, XFS_DATA_FORK); xfs_dir3_data_init()
599 bp->b_ops = &xfs_dir3_data_buf_ops; xfs_dir3_data_init()
600 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_DATA_BUF); xfs_dir3_data_init()
605 hdr = bp->b_addr; xfs_dir3_data_init()
607 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_data_init()
611 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_dir3_data_init()
638 xfs_dir2_data_log_header(args, bp); xfs_dir3_data_init()
639 xfs_dir2_data_log_unused(args, bp, dup); xfs_dir3_data_init()
640 *bpp = bp; xfs_dir3_data_init()
650 struct xfs_buf *bp, xfs_dir2_data_log_entry()
653 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir2_data_log_entry()
660 xfs_trans_log_buf(args->trans, bp, (uint)((char *)dep - (char *)hdr), xfs_dir2_data_log_entry()
671 struct xfs_buf *bp) xfs_dir2_data_log_header()
674 struct xfs_dir2_data_hdr *hdr = bp->b_addr; xfs_dir2_data_log_header()
682 xfs_trans_log_buf(args->trans, bp, 0, xfs_dir2_data_log_header()
692 struct xfs_buf *bp, xfs_dir2_data_log_unused()
695 xfs_dir2_data_hdr_t *hdr = bp->b_addr; xfs_dir2_data_log_unused()
705 xfs_trans_log_buf(args->trans, bp, (uint)((char *)dup - (char *)hdr), xfs_dir2_data_log_unused()
711 xfs_trans_log_buf(args->trans, bp, xfs_dir2_data_log_unused()
724 struct xfs_buf *bp, xfs_dir2_data_make_free()
739 hdr = bp->b_addr; xfs_dir2_data_make_free()
807 xfs_dir2_data_log_unused(args, bp, prevdup); xfs_dir2_data_make_free()
842 xfs_dir2_data_log_unused(args, bp, prevdup); xfs_dir2_data_make_free()
870 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_make_free()
897 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_make_free()
909 struct xfs_buf *bp, xfs_dir2_data_use_free()
926 hdr = bp->b_addr; xfs_dir2_data_use_free()
971 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free()
999 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free()
1027 xfs_dir2_data_log_unused(args, bp, newdup); xfs_dir2_data_use_free()
1033 xfs_dir2_data_log_unused(args, bp, newdup2); xfs_dir2_data_use_free()
42 __xfs_dir3_data_check( struct xfs_inode *dp, struct xfs_buf *bp) __xfs_dir3_data_check() argument
215 xfs_dir3_data_verify( struct xfs_buf *bp) xfs_dir3_data_verify() argument
245 xfs_dir3_data_reada_verify( struct xfs_buf *bp) xfs_dir3_data_reada_verify() argument
269 xfs_dir3_data_read_verify( struct xfs_buf *bp) xfs_dir3_data_read_verify() argument
285 xfs_dir3_data_write_verify( struct xfs_buf *bp) xfs_dir3_data_write_verify() argument
648 xfs_dir2_data_log_entry( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_entry_t *dep) xfs_dir2_data_log_entry() argument
669 xfs_dir2_data_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir2_data_log_header() argument
690 xfs_dir2_data_log_unused( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup) xfs_dir2_data_log_unused() argument
722 xfs_dir2_data_make_free( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp) xfs_dir2_data_make_free() argument
907 xfs_dir2_data_use_free( struct xfs_da_args *args, struct xfs_buf *bp, xfs_dir2_data_unused_t *dup, xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp) xfs_dir2_data_use_free() argument
H A Dxfs_dir2_priv.h43 #define xfs_dir3_data_check(dp,bp) __xfs_dir3_data_check(dp, bp);
45 #define xfs_dir3_data_check(dp,bp)
48 extern int __xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
68 struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_buf *bp);
75 struct xfs_buf *bp, int first, int last);
77 struct xfs_buf *bp);
98 struct xfs_buf *bp, int *count);
99 extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
122 extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
H A Dxfs_inode_buf.c40 xfs_buf_t *bp) xfs_inobp_check()
49 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize); xfs_inobp_check()
53 i, (long long)bp->b_bn); xfs_inobp_check()
76 struct xfs_buf *bp, xfs_inode_buf_verify()
79 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_inode_buf_verify()
86 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; xfs_inode_buf_verify()
91 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); xfs_inode_buf_verify()
98 bp->b_flags &= ~XBF_DONE; xfs_inode_buf_verify()
99 xfs_buf_ioerror(bp, -EIO); xfs_inode_buf_verify()
103 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inode_buf_verify()
104 xfs_verifier_error(bp); xfs_inode_buf_verify()
108 (unsigned long long)bp->b_bn, i, xfs_inode_buf_verify()
113 xfs_inobp_check(mp, bp); xfs_inode_buf_verify()
119 struct xfs_buf *bp) xfs_inode_buf_read_verify()
121 xfs_inode_buf_verify(bp, false); xfs_inode_buf_read_verify()
126 struct xfs_buf *bp) xfs_inode_buf_readahead_verify()
128 xfs_inode_buf_verify(bp, true); xfs_inode_buf_readahead_verify()
133 struct xfs_buf *bp) xfs_inode_buf_write_verify()
135 xfs_inode_buf_verify(bp, false); xfs_inode_buf_write_verify()
170 struct xfs_buf *bp; xfs_imap_to_bp() local
175 (int)imap->im_len, buf_flags, &bp, xfs_imap_to_bp()
192 *bpp = bp; xfs_imap_to_bp()
193 *dipp = xfs_buf_offset(bp, imap->im_boffset); xfs_imap_to_bp()
353 xfs_buf_t *bp; xfs_iread() local
384 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); xfs_iread()
463 xfs_buf_set_ref(bp, XFS_INO_REF); xfs_iread()
478 xfs_trans_brelse(tp, bp); xfs_iread()
38 xfs_inobp_check( xfs_mount_t *mp, xfs_buf_t *bp) xfs_inobp_check() argument
75 xfs_inode_buf_verify( struct xfs_buf *bp, bool readahead) xfs_inode_buf_verify() argument
118 xfs_inode_buf_read_verify( struct xfs_buf *bp) xfs_inode_buf_read_verify() argument
125 xfs_inode_buf_readahead_verify( struct xfs_buf *bp) xfs_inode_buf_readahead_verify() argument
132 xfs_inode_buf_write_verify( struct xfs_buf *bp) xfs_inode_buf_write_verify() argument
H A Dxfs_rtbitmap.c56 xfs_buf_t *bp; /* block buffer, result */ xfs_rtbuf_get() local
71 mp->m_bsize, 0, &bp, NULL); xfs_rtbuf_get()
74 *bpp = bp; xfs_rtbuf_get()
93 xfs_buf_t *bp; /* buf for the block */ xfs_rtfind_back() local
108 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtfind_back()
112 bufp = bp->b_addr; xfs_rtfind_back()
145 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
159 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
160 error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); xfs_rtfind_back()
164 bufp = bp->b_addr; xfs_rtfind_back()
191 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
205 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
206 error = xfs_rtbuf_get(mp, tp, --block, 0, &bp); xfs_rtfind_back()
210 bufp = bp->b_addr; xfs_rtfind_back()
238 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
248 xfs_trans_brelse(tp, bp); xfs_rtfind_back()
268 xfs_buf_t *bp; /* buf for the block */ xfs_rtfind_forw() local
283 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtfind_forw()
287 bufp = bp->b_addr; xfs_rtfind_forw()
319 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
333 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
334 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtfind_forw()
338 b = bufp = bp->b_addr; xfs_rtfind_forw()
364 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
378 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
379 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtfind_forw()
383 b = bufp = bp->b_addr; xfs_rtfind_forw()
408 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
418 xfs_trans_brelse(tp, bp); xfs_rtfind_forw()
443 xfs_buf_t *bp; /* buffer for the summary block */ xfs_rtmodify_summary_int() local
461 bp = *rbpp; xfs_rtmodify_summary_int()
471 error = xfs_rtbuf_get(mp, tp, sb, 1, &bp); xfs_rtmodify_summary_int()
478 *rbpp = bp; xfs_rtmodify_summary_int()
484 sp = XFS_SUMPTR(mp, bp, so); xfs_rtmodify_summary_int()
486 uint first = (uint)((char *)sp - (char *)bp->b_addr); xfs_rtmodify_summary_int()
489 xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1); xfs_rtmodify_summary_int()
525 xfs_buf_t *bp; /* buf for the block */ xfs_rtmodify_range() local
541 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtmodify_range()
545 bufp = bp->b_addr; xfs_rtmodify_range()
583 xfs_trans_log_buf(tp, bp, xfs_rtmodify_range()
586 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtmodify_range()
590 first = b = bufp = bp->b_addr; xfs_rtmodify_range()
623 xfs_trans_log_buf(tp, bp, xfs_rtmodify_range()
626 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtmodify_range()
630 first = b = bufp = bp->b_addr; xfs_rtmodify_range()
662 xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp), xfs_rtmodify_range()
760 xfs_buf_t *bp; /* buf for the block */ xfs_rtcheck_range() local
776 error = xfs_rtbuf_get(mp, tp, block, 0, &bp); xfs_rtcheck_range()
780 bufp = bp->b_addr; xfs_rtcheck_range()
811 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
826 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
827 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtcheck_range()
831 b = bufp = bp->b_addr; xfs_rtcheck_range()
857 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
872 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
873 error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp); xfs_rtcheck_range()
877 b = bufp = bp->b_addr; xfs_rtcheck_range()
902 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
913 xfs_trans_brelse(tp, bp); xfs_rtcheck_range()
H A Dxfs_dir2_node.c41 static int xfs_dir2_leafn_add(struct xfs_buf *bp, xfs_da_args_t *args,
46 static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp,
56 #define xfs_dir3_leaf_check(dp, bp) \
58 if (!xfs_dir3_leafn_check((dp), (bp))) \
65 struct xfs_buf *bp) xfs_dir3_leafn_check()
67 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leafn_check()
73 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leafn_check()
74 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leafn_check()
82 #define xfs_dir3_leaf_check(dp, bp)
87 struct xfs_buf *bp) xfs_dir3_free_verify()
89 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_verify()
90 struct xfs_dir2_free_hdr *hdr = bp->b_addr; xfs_dir3_free_verify()
93 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_free_verify()
99 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_free_verify()
115 struct xfs_buf *bp) xfs_dir3_free_read_verify()
117 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_read_verify()
120 !xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF)) xfs_dir3_free_read_verify()
121 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_dir3_free_read_verify()
122 else if (!xfs_dir3_free_verify(bp)) xfs_dir3_free_read_verify()
123 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_free_read_verify()
125 if (bp->b_error) xfs_dir3_free_read_verify()
126 xfs_verifier_error(bp); xfs_dir3_free_read_verify()
131 struct xfs_buf *bp) xfs_dir3_free_write_verify()
133 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_free_write_verify()
134 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_dir3_free_write_verify()
135 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; xfs_dir3_free_write_verify()
137 if (!xfs_dir3_free_verify(bp)) { xfs_dir3_free_write_verify()
138 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_dir3_free_write_verify()
139 xfs_verifier_error(bp); xfs_dir3_free_write_verify()
149 xfs_buf_update_cksum(bp, XFS_DIR3_FREE_CRC_OFF); xfs_dir3_free_write_verify()
207 struct xfs_buf *bp; xfs_dir3_free_get_buf() local
212 -1, &bp, XFS_DATA_FORK); xfs_dir3_free_get_buf()
216 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_FREE_BUF); xfs_dir3_free_get_buf()
217 bp->b_ops = &xfs_dir3_free_buf_ops; xfs_dir3_free_get_buf()
223 memset(bp->b_addr, 0, sizeof(struct xfs_dir3_free_hdr)); xfs_dir3_free_get_buf()
227 struct xfs_dir3_free_hdr *hdr3 = bp->b_addr; xfs_dir3_free_get_buf()
231 hdr3->hdr.blkno = cpu_to_be64(bp->b_bn); xfs_dir3_free_get_buf()
236 dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr); xfs_dir3_free_get_buf()
237 *bpp = bp; xfs_dir3_free_get_buf()
247 struct xfs_buf *bp, xfs_dir2_free_log_bests()
254 free = bp->b_addr; xfs_dir2_free_log_bests()
258 xfs_trans_log_buf(args->trans, bp, xfs_dir2_free_log_bests()
270 struct xfs_buf *bp) xfs_dir2_free_log_header()
275 free = bp->b_addr; xfs_dir2_free_log_header()
279 xfs_trans_log_buf(args->trans, bp, 0, xfs_dir2_free_log_header()
378 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_add()
396 leaf = bp->b_addr; xfs_dir2_leafn_add()
453 xfs_dir3_leaf_log_header(args, bp); xfs_dir2_leafn_add()
454 xfs_dir3_leaf_log_ents(args, bp, lfloglow, lfloghigh); xfs_dir2_leafn_add()
455 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_add()
463 struct xfs_buf *bp, xfs_dir2_free_hdr_check()
468 dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr); xfs_dir2_free_hdr_check()
476 #define xfs_dir2_free_hdr_check(dp, bp, db)
486 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lasthash()
489 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir2_leafn_lasthash()
513 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_for_addname()
539 leaf = bp->b_addr; xfs_dir2_leafn_lookup_for_addname()
543 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_lookup_for_addname()
549 index = xfs_dir2_leaf_search_hash(args, bp); xfs_dir2_leafn_lookup_for_addname()
555 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_addname()
642 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_addname()
668 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_for_entry()
691 leaf = bp->b_addr; xfs_dir2_leafn_lookup_for_entry()
695 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_lookup_for_entry()
701 index = xfs_dir2_leaf_search_hash(args, bp); xfs_dir2_leafn_lookup_for_entry()
706 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_entry()
746 curbp = state->extrablk.bp; xfs_dir2_leafn_lookup_for_entry()
774 xfs_trans_brelse(tp, state->extrablk.bp); xfs_dir2_leafn_lookup_for_entry()
780 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_entry()
796 state->extrablk.bp = curbp; xfs_dir2_leafn_lookup_for_entry()
804 if (state->extrablk.bp != curbp) xfs_dir2_leafn_lookup_for_entry()
821 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_lookup_int()
827 return xfs_dir2_leafn_lookup_for_addname(bp, args, indexp, xfs_dir2_leafn_lookup_int()
829 return xfs_dir2_leafn_lookup_for_entry(bp, args, indexp, state); xfs_dir2_leafn_lookup_int()
974 if ((swap = xfs_dir2_leafn_order(dp, blk1->bp, blk2->bp))) { xfs_dir2_leafn_rebalance()
981 leaf1 = blk1->bp->b_addr; xfs_dir2_leafn_rebalance()
982 leaf2 = blk2->bp->b_addr; xfs_dir2_leafn_rebalance()
1020 xfs_dir3_leafn_moveents(args, blk1->bp, &hdr1, ents1, xfs_dir2_leafn_rebalance()
1021 hdr1.count - count, blk2->bp, xfs_dir2_leafn_rebalance()
1024 xfs_dir3_leafn_moveents(args, blk2->bp, &hdr2, ents2, 0, xfs_dir2_leafn_rebalance()
1025 blk1->bp, &hdr1, ents1, xfs_dir2_leafn_rebalance()
1034 xfs_dir3_leaf_log_header(args, blk1->bp); xfs_dir2_leafn_rebalance()
1035 xfs_dir3_leaf_log_header(args, blk2->bp); xfs_dir2_leafn_rebalance()
1037 xfs_dir3_leaf_check(dp, blk1->bp); xfs_dir2_leafn_rebalance()
1038 xfs_dir3_leaf_check(dp, blk2->bp); xfs_dir2_leafn_rebalance()
1155 struct xfs_buf *bp, /* leaf buffer */ xfs_dir2_leafn_remove()
1180 leaf = bp->b_addr; xfs_dir2_leafn_remove()
1203 xfs_dir3_leaf_log_header(args, bp); xfs_dir2_leafn_remove()
1206 xfs_dir3_leaf_log_ents(args, bp, index, index); xfs_dir2_leafn_remove()
1212 dbp = dblk->bp; xfs_dir2_leafn_remove()
1276 dblk->bp = NULL; xfs_dir2_leafn_remove()
1297 xfs_dir3_leaf_check(dp, bp); xfs_dir2_leafn_remove()
1336 &newblk->bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir2_leafn_split()
1355 error = xfs_dir2_leafn_add(oldblk->bp, args, oldblk->index); xfs_dir2_leafn_split()
1357 error = xfs_dir2_leafn_add(newblk->bp, args, newblk->index); xfs_dir2_leafn_split()
1361 oldblk->hashval = xfs_dir2_leafn_lasthash(dp, oldblk->bp, NULL); xfs_dir2_leafn_split()
1362 newblk->hashval = xfs_dir2_leafn_lasthash(dp, newblk->bp, NULL); xfs_dir2_leafn_split()
1363 xfs_dir3_leaf_check(dp, oldblk->bp); xfs_dir2_leafn_split()
1364 xfs_dir3_leaf_check(dp, newblk->bp); xfs_dir2_leafn_split()
1384 struct xfs_buf *bp; /* leaf buffer */ xfs_dir2_leafn_toosmall() local
1402 leaf = blk->bp->b_addr; xfs_dir2_leafn_toosmall()
1405 xfs_dir3_leaf_check(dp, blk->bp); xfs_dir2_leafn_toosmall()
1444 for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { xfs_dir2_leafn_toosmall()
1454 blkno, -1, &bp); xfs_dir2_leafn_toosmall()
1465 leaf = bp->b_addr; xfs_dir2_leafn_toosmall()
1476 xfs_trans_brelse(state->args->trans, bp); xfs_dir2_leafn_toosmall()
1526 drop_leaf = drop_blk->bp->b_addr; xfs_dir2_leafn_unbalance()
1527 save_leaf = save_blk->bp->b_addr; xfs_dir2_leafn_unbalance()
1539 xfs_dir3_leaf_compact(args, &drophdr, drop_blk->bp); xfs_dir2_leafn_unbalance()
1541 xfs_dir3_leaf_compact(args, &savehdr, save_blk->bp); xfs_dir2_leafn_unbalance()
1547 if (xfs_dir2_leafn_order(dp, save_blk->bp, drop_blk->bp)) xfs_dir2_leafn_unbalance()
1548 xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0, xfs_dir2_leafn_unbalance()
1549 save_blk->bp, &savehdr, sents, 0, xfs_dir2_leafn_unbalance()
1552 xfs_dir3_leafn_moveents(args, drop_blk->bp, &drophdr, dents, 0, xfs_dir2_leafn_unbalance()
1553 save_blk->bp, &savehdr, sents, xfs_dir2_leafn_unbalance()
1560 xfs_dir3_leaf_log_header(args, save_blk->bp); xfs_dir2_leafn_unbalance()
1561 xfs_dir3_leaf_log_header(args, drop_blk->bp); xfs_dir2_leafn_unbalance()
1563 xfs_dir3_leaf_check(dp, save_blk->bp); xfs_dir2_leafn_unbalance()
1564 xfs_dir3_leaf_check(dp, drop_blk->bp); xfs_dir2_leafn_unbalance()
1611 rval = xfs_dir2_leafn_add(blk->bp, args, blk->index); xfs_dir2_node_addname()
1680 fbp = fblk->bp; xfs_dir2_node_addname_int()
1796 if (fblk && fblk->bp) xfs_dir2_node_addname_int()
1797 fblk->bp = NULL; xfs_dir2_node_addname_int()
1826 if (fblk && fblk->bp) xfs_dir2_node_addname_int()
1827 fblk->bp = NULL; xfs_dir2_node_addname_int()
2041 ((char *)state->extrablk.bp->b_addr + xfs_dir2_node_lookup()
2049 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_dir2_node_lookup()
2050 state->path.blk[i].bp = NULL; xfs_dir2_node_lookup()
2055 if (state->extravalid && state->extrablk.bp) { xfs_dir2_node_lookup()
2056 xfs_trans_brelse(args->trans, state->extrablk.bp); xfs_dir2_node_lookup()
2057 state->extrablk.bp = NULL; xfs_dir2_node_lookup()
2102 error = xfs_dir2_leafn_remove(args, blk->bp, blk->index, xfs_dir2_node_removename()
2178 leaf = blk->bp->b_addr; xfs_dir2_node_replace()
2185 hdr = state->extrablk.bp->b_addr; xfs_dir2_node_replace()
2198 xfs_dir2_data_log_entry(args, state->extrablk.bp, dep); xfs_dir2_node_replace()
2205 xfs_trans_brelse(args->trans, state->extrablk.bp); xfs_dir2_node_replace()
2206 state->extrablk.bp = NULL; xfs_dir2_node_replace()
2212 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_dir2_node_replace()
2213 state->path.blk[i].bp = NULL; xfs_dir2_node_replace()
2229 struct xfs_buf *bp; /* freespace buffer */ xfs_dir2_node_trim_free() local
2241 error = xfs_dir2_free_try_read(tp, dp, fo, &bp); xfs_dir2_node_trim_free()
2248 if (!bp) xfs_dir2_node_trim_free()
2250 free = bp->b_addr; xfs_dir2_node_trim_free()
2257 xfs_trans_brelse(tp, bp); xfs_dir2_node_trim_free()
2265 xfs_dir2_da_to_db(args->geo, (xfs_dablk_t)fo), bp); xfs_dir2_node_trim_free()
2273 xfs_trans_brelse(tp, bp); xfs_dir2_node_trim_free()
63 xfs_dir3_leafn_check( struct xfs_inode *dp, struct xfs_buf *bp) xfs_dir3_leafn_check() argument
86 xfs_dir3_free_verify( struct xfs_buf *bp) xfs_dir3_free_verify() argument
114 xfs_dir3_free_read_verify( struct xfs_buf *bp) xfs_dir3_free_read_verify() argument
130 xfs_dir3_free_write_verify( struct xfs_buf *bp) xfs_dir3_free_write_verify() argument
245 xfs_dir2_free_log_bests( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir2_free_log_bests() argument
268 xfs_dir2_free_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir2_free_log_header() argument
377 xfs_dir2_leafn_add( struct xfs_buf *bp, xfs_da_args_t *args, int index) xfs_dir2_leafn_add() argument
461 xfs_dir2_free_hdr_check( struct xfs_inode *dp, struct xfs_buf *bp, xfs_dir2_db_t db) xfs_dir2_free_hdr_check() argument
484 xfs_dir2_leafn_lasthash( struct xfs_inode *dp, struct xfs_buf *bp, int *count) xfs_dir2_leafn_lasthash() argument
512 xfs_dir2_leafn_lookup_for_addname( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_for_addname() argument
667 xfs_dir2_leafn_lookup_for_entry( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_for_entry() argument
820 xfs_dir2_leafn_lookup_int( struct xfs_buf *bp, xfs_da_args_t *args, int *indexp, xfs_da_state_t *state) xfs_dir2_leafn_lookup_int() argument
1153 xfs_dir2_leafn_remove( xfs_da_args_t *args, struct xfs_buf *bp, int index, xfs_da_state_blk_t *dblk, int *rval) xfs_dir2_leafn_remove() argument
H A Dxfs_ialloc_btree.c126 struct xfs_buf *bp) xfs_inobt_free_block()
131 fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)); xfs_inobt_free_block()
136 xfs_trans_binval(cur->bc_tp, bp); xfs_inobt_free_block()
220 struct xfs_buf *bp) xfs_inobt_verify()
222 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_inobt_verify()
223 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_inobt_verify()
224 struct xfs_perag *pag = bp->b_pag; xfs_inobt_verify()
244 if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) xfs_inobt_verify()
279 struct xfs_buf *bp) xfs_inobt_read_verify()
281 if (!xfs_btree_sblock_verify_crc(bp)) xfs_inobt_read_verify()
282 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_inobt_read_verify()
283 else if (!xfs_inobt_verify(bp)) xfs_inobt_read_verify()
284 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inobt_read_verify()
286 if (bp->b_error) { xfs_inobt_read_verify()
287 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_inobt_read_verify()
288 xfs_verifier_error(bp); xfs_inobt_read_verify()
294 struct xfs_buf *bp) xfs_inobt_write_verify()
296 if (!xfs_inobt_verify(bp)) { xfs_inobt_write_verify()
297 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_inobt_write_verify()
298 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_inobt_write_verify()
299 xfs_verifier_error(bp); xfs_inobt_write_verify()
302 xfs_btree_sblock_calc_crc(bp); xfs_inobt_write_verify()
124 xfs_inobt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_inobt_free_block() argument
219 xfs_inobt_verify( struct xfs_buf *bp) xfs_inobt_verify() argument
278 xfs_inobt_read_verify( struct xfs_buf *bp) xfs_inobt_read_verify() argument
293 xfs_inobt_write_verify( struct xfs_buf *bp) xfs_inobt_write_verify() argument
H A Dxfs_attr_leaf.h54 int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
62 int xfs_attr3_leaf_to_shortform(struct xfs_buf *bp,
76 int xfs_attr3_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args);
81 int xfs_attr3_leaf_list_int(struct xfs_buf *bp,
96 xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
H A Dxfs_attr_leaf.c252 struct xfs_buf *bp) xfs_attr3_leaf_verify()
254 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_verify()
255 struct xfs_attr_leafblock *leaf = bp->b_addr; xfs_attr3_leaf_verify()
261 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; xfs_attr3_leaf_verify()
268 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn) xfs_attr3_leaf_verify()
287 struct xfs_buf *bp) xfs_attr3_leaf_write_verify()
289 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_write_verify()
290 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_attr3_leaf_write_verify()
291 struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; xfs_attr3_leaf_write_verify()
293 if (!xfs_attr3_leaf_verify(bp)) { xfs_attr3_leaf_write_verify()
294 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_leaf_write_verify()
295 xfs_verifier_error(bp); xfs_attr3_leaf_write_verify()
305 xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF); xfs_attr3_leaf_write_verify()
316 struct xfs_buf *bp) xfs_attr3_leaf_read_verify()
318 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr3_leaf_read_verify()
321 !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) xfs_attr3_leaf_read_verify()
322 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_attr3_leaf_read_verify()
323 else if (!xfs_attr3_leaf_verify(bp)) xfs_attr3_leaf_read_verify()
324 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_attr3_leaf_read_verify()
326 if (bp->b_error) xfs_attr3_leaf_read_verify()
327 xfs_verifier_error(bp); xfs_attr3_leaf_read_verify()
748 struct xfs_buf *bp; xfs_attr_shortform_to_leaf() local
765 bp = NULL; xfs_attr_shortform_to_leaf()
780 error = xfs_attr3_leaf_create(args, blkno, &bp); xfs_attr_shortform_to_leaf()
782 error = xfs_da_shrink_inode(args, 0, bp); xfs_attr_shortform_to_leaf()
783 bp = NULL; xfs_attr_shortform_to_leaf()
810 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ xfs_attr_shortform_to_leaf()
812 error = xfs_attr3_leaf_add(bp, &nargs); xfs_attr_shortform_to_leaf()
831 struct xfs_buf *bp, xfs_attr_shortform_allfit()
840 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr_shortform_allfit()
842 leaf = bp->b_addr; xfs_attr_shortform_allfit()
873 struct xfs_buf *bp, xfs_attr3_leaf_to_shortform()
893 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); xfs_attr3_leaf_to_shortform()
900 memset(bp->b_addr, 0, args->geo->blksize); xfs_attr3_leaf_to_shortform()
905 error = xfs_da_shrink_inode(args, 0, bp); xfs_attr3_leaf_to_shortform()
1039 struct xfs_buf *bp; xfs_attr3_leaf_create() local
1044 error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp, xfs_attr3_leaf_create()
1048 bp->b_ops = &xfs_attr3_leaf_buf_ops; xfs_attr3_leaf_create()
1049 xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF); xfs_attr3_leaf_create()
1050 leaf = bp->b_addr; xfs_attr3_leaf_create()
1057 struct xfs_da3_blkinfo *hdr3 = bp->b_addr; xfs_attr3_leaf_create()
1061 hdr3->blkno = cpu_to_be64(bp->b_bn); xfs_attr3_leaf_create()
1073 xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_create()
1075 *bpp = bp; xfs_attr3_leaf_create()
1100 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); xfs_attr3_leaf_split()
1124 error = xfs_attr3_leaf_add(oldblk->bp, state->args); xfs_attr3_leaf_split()
1127 error = xfs_attr3_leaf_add(newblk->bp, state->args); xfs_attr3_leaf_split()
1133 oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); xfs_attr3_leaf_split()
1134 newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); xfs_attr3_leaf_split()
1143 struct xfs_buf *bp, xfs_attr3_leaf_add()
1156 leaf = bp->b_addr; xfs_attr3_leaf_add()
1178 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i); xfs_attr3_leaf_add()
1196 xfs_attr3_leaf_compact(args, &ichdr, bp); xfs_attr3_leaf_add()
1207 tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0); xfs_attr3_leaf_add()
1211 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add()
1222 struct xfs_buf *bp, xfs_attr3_leaf_add_work()
1237 leaf = bp->b_addr; xfs_attr3_leaf_add_work()
1249 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work()
1279 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work()
1312 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_add_work()
1344 struct xfs_buf *bp) xfs_attr3_leaf_compact()
1355 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); xfs_attr3_leaf_compact()
1356 memset(bp->b_addr, 0, args->geo->blksize); xfs_attr3_leaf_compact()
1358 leaf_dst = bp->b_addr; xfs_attr3_leaf_compact()
1365 memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src)); xfs_attr3_leaf_compact()
1390 xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_compact()
1471 leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_rebalance()
1472 leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_rebalance()
1487 if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) { xfs_attr3_leaf_rebalance()
1500 leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_rebalance()
1501 leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_rebalance()
1537 xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp); xfs_attr3_leaf_rebalance()
1566 xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp); xfs_attr3_leaf_rebalance()
1577 xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_rebalance()
1578 xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1); xfs_attr3_leaf_rebalance()
1654 struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr; xfs_attr3_leaf_figure_balance()
1655 struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr; xfs_attr3_leaf_figure_balance()
1752 struct xfs_buf *bp; xfs_attr3_leaf_toosmall() local
1768 leaf = blk->bp->b_addr; xfs_attr3_leaf_toosmall()
1821 blkno, -1, &bp); xfs_attr3_leaf_toosmall()
1825 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr); xfs_attr3_leaf_toosmall()
1834 xfs_trans_brelse(state->args->trans, bp); xfs_attr3_leaf_toosmall()
1873 struct xfs_buf *bp, xfs_attr3_leaf_remove()
1889 leaf = bp->b_addr; xfs_attr3_leaf_remove()
1973 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove()
1980 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove()
2008 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_remove()
2031 struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr; xfs_attr3_leaf_unbalance()
2032 struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr; xfs_attr3_leaf_unbalance()
2039 drop_leaf = drop_blk->bp->b_addr; xfs_attr3_leaf_unbalance()
2040 save_leaf = save_blk->bp->b_addr; xfs_attr3_leaf_unbalance()
2060 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, xfs_attr3_leaf_unbalance()
2061 drop_blk->bp, &drophdr)) { xfs_attr3_leaf_unbalance()
2098 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr, xfs_attr3_leaf_unbalance()
2099 drop_blk->bp, &drophdr)) { xfs_attr3_leaf_unbalance()
2124 xfs_trans_log_buf(state->args->trans, save_blk->bp, 0, xfs_attr3_leaf_unbalance()
2153 struct xfs_buf *bp, xfs_attr3_leaf_lookup_int()
2168 leaf = bp->b_addr; xfs_attr3_leaf_lookup_int()
2263 struct xfs_buf *bp, xfs_attr3_leaf_getvalue()
2273 leaf = bp->b_addr; xfs_attr3_leaf_getvalue()
2470 struct xfs_buf *bp, xfs_attr_leaf_lasthash()
2475 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_attr_leaf_lasthash()
2477 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr); xfs_attr_leaf_lasthash()
2478 entries = xfs_attr3_leaf_entryp(bp->b_addr); xfs_attr_leaf_lasthash()
2549 struct xfs_buf *bp; xfs_attr3_leaf_clearflag() local
2562 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr3_leaf_clearflag()
2566 leaf = bp->b_addr; xfs_attr3_leaf_clearflag()
2590 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_clearflag()
2598 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_clearflag()
2618 struct xfs_buf *bp; xfs_attr3_leaf_setflag() local
2629 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr3_leaf_setflag()
2633 leaf = bp->b_addr; xfs_attr3_leaf_setflag()
2643 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_setflag()
2649 xfs_trans_log_buf(args->trans, bp, xfs_attr3_leaf_setflag()
251 xfs_attr3_leaf_verify( struct xfs_buf *bp) xfs_attr3_leaf_verify() argument
286 xfs_attr3_leaf_write_verify( struct xfs_buf *bp) xfs_attr3_leaf_write_verify() argument
315 xfs_attr3_leaf_read_verify( struct xfs_buf *bp) xfs_attr3_leaf_read_verify() argument
830 xfs_attr_shortform_allfit( struct xfs_buf *bp, struct xfs_inode *dp) xfs_attr_shortform_allfit() argument
872 xfs_attr3_leaf_to_shortform( struct xfs_buf *bp, struct xfs_da_args *args, int forkoff) xfs_attr3_leaf_to_shortform() argument
1142 xfs_attr3_leaf_add( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_add() argument
1221 xfs_attr3_leaf_add_work( struct xfs_buf *bp, struct xfs_attr3_icleaf_hdr *ichdr, struct xfs_da_args *args, int mapindex) xfs_attr3_leaf_add_work() argument
1341 xfs_attr3_leaf_compact( struct xfs_da_args *args, struct xfs_attr3_icleaf_hdr *ichdr_dst, struct xfs_buf *bp) xfs_attr3_leaf_compact() argument
1872 xfs_attr3_leaf_remove( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_remove() argument
2152 xfs_attr3_leaf_lookup_int( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_lookup_int() argument
2262 xfs_attr3_leaf_getvalue( struct xfs_buf *bp, struct xfs_da_args *args) xfs_attr3_leaf_getvalue() argument
2469 xfs_attr_leaf_lasthash( struct xfs_buf *bp, int *count) xfs_attr_leaf_lasthash() argument
H A Dxfs_dir2_leaf.c44 struct xfs_buf *bp, int first, int last);
46 struct xfs_buf *bp);
53 #define xfs_dir3_leaf_check(dp, bp) \
55 if (!xfs_dir3_leaf1_check((dp), (bp))) \
62 struct xfs_buf *bp) xfs_dir3_leaf1_check()
64 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf1_check()
70 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf1_check()
71 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf1_check()
79 #define xfs_dir3_leaf_check(dp, bp)
147 struct xfs_buf *bp, xfs_dir3_leaf_verify()
150 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_dir3_leaf_verify()
151 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_verify()
156 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf_verify()
166 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf_verify()
180 struct xfs_buf *bp, __read_verify()
183 struct xfs_mount *mp = bp->b_target->bt_mount; __read_verify()
186 !xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF)) __read_verify()
187 xfs_buf_ioerror(bp, -EFSBADCRC); __read_verify()
188 else if (!xfs_dir3_leaf_verify(bp, magic)) __read_verify()
189 xfs_buf_ioerror(bp, -EFSCORRUPTED); __read_verify()
191 if (bp->b_error) __read_verify()
192 xfs_verifier_error(bp); __read_verify()
197 struct xfs_buf *bp, __write_verify()
200 struct xfs_mount *mp = bp->b_target->bt_mount; __write_verify()
201 struct xfs_buf_log_item *bip = bp->b_fspriv; __write_verify()
202 struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; __write_verify()
204 if (!xfs_dir3_leaf_verify(bp, magic)) { __write_verify()
205 xfs_buf_ioerror(bp, -EFSCORRUPTED); __write_verify()
206 xfs_verifier_error(bp); __write_verify()
216 xfs_buf_update_cksum(bp, XFS_DIR3_LEAF_CRC_OFF); __write_verify()
221 struct xfs_buf *bp) xfs_dir3_leaf1_read_verify()
223 __read_verify(bp, XFS_DIR2_LEAF1_MAGIC); xfs_dir3_leaf1_read_verify()
228 struct xfs_buf *bp) xfs_dir3_leaf1_write_verify()
230 __write_verify(bp, XFS_DIR2_LEAF1_MAGIC); xfs_dir3_leaf1_write_verify()
235 struct xfs_buf *bp) xfs_dir3_leafn_read_verify()
237 __read_verify(bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir3_leafn_read_verify()
242 struct xfs_buf *bp) xfs_dir3_leafn_write_verify()
244 __write_verify(bp, XFS_DIR2_LEAFN_MAGIC); xfs_dir3_leafn_write_verify()
300 struct xfs_buf *bp, xfs_dir3_leaf_init()
304 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_init()
309 struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr; xfs_dir3_leaf_init()
316 leaf3->info.blkno = cpu_to_be64(bp->b_bn); xfs_dir3_leaf_init()
333 bp->b_ops = &xfs_dir3_leaf1_buf_ops; xfs_dir3_leaf_init()
334 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAF1_BUF); xfs_dir3_leaf_init()
336 bp->b_ops = &xfs_dir3_leafn_buf_ops; xfs_dir3_leaf_init()
337 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF); xfs_dir3_leaf_init()
351 struct xfs_buf *bp; xfs_dir3_leaf_get_buf() local
359 -1, &bp, XFS_DATA_FORK); xfs_dir3_leaf_get_buf()
363 xfs_dir3_leaf_init(mp, tp, bp, dp->i_ino, magic); xfs_dir3_leaf_get_buf()
364 xfs_dir3_leaf_log_header(args, bp); xfs_dir3_leaf_get_buf()
366 xfs_dir3_leaf_log_tail(args, bp); xfs_dir3_leaf_get_buf()
367 *bpp = bp; xfs_dir3_leaf_get_buf()
916 struct xfs_buf *bp) /* leaf buffer */ xfs_dir3_leaf_compact()
925 leaf = bp->b_addr; xfs_dir3_leaf_compact()
954 xfs_dir3_leaf_log_header(args, bp); xfs_dir3_leaf_compact()
956 xfs_dir3_leaf_log_ents(args, bp, loglow, to - 1); xfs_dir3_leaf_compact()
1059 struct xfs_buf *bp, /* leaf buffer */ xfs_dir3_leaf_log_bests()
1065 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_bests()
1074 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_bests()
1085 struct xfs_buf *bp, xfs_dir3_leaf_log_ents()
1091 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_ents()
1102 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_ents()
1113 struct xfs_buf *bp) xfs_dir3_leaf_log_header()
1115 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_header()
1122 xfs_trans_log_buf(args->trans, bp, xfs_dir3_leaf_log_header()
1133 struct xfs_buf *bp) xfs_dir3_leaf_log_tail()
1135 struct xfs_dir2_leaf *leaf = bp->b_addr; xfs_dir3_leaf_log_tail()
1144 xfs_trans_log_buf(args->trans, bp, (uint)((char *)ltp - (char *)leaf), xfs_dir3_leaf_log_tail()
1742 lbp = state->path.blk[0].bp; xfs_dir2_node_to_leaf()
1822 state->path.blk[0].bp = NULL; xfs_dir2_node_to_leaf()
60 xfs_dir3_leaf1_check( struct xfs_inode *dp, struct xfs_buf *bp) xfs_dir3_leaf1_check() argument
146 xfs_dir3_leaf_verify( struct xfs_buf *bp, __uint16_t magic) xfs_dir3_leaf_verify() argument
179 __read_verify( struct xfs_buf *bp, __uint16_t magic) __read_verify() argument
196 __write_verify( struct xfs_buf *bp, __uint16_t magic) __write_verify() argument
220 xfs_dir3_leaf1_read_verify( struct xfs_buf *bp) xfs_dir3_leaf1_read_verify() argument
227 xfs_dir3_leaf1_write_verify( struct xfs_buf *bp) xfs_dir3_leaf1_write_verify() argument
234 xfs_dir3_leafn_read_verify( struct xfs_buf *bp) xfs_dir3_leafn_read_verify() argument
241 xfs_dir3_leafn_write_verify( struct xfs_buf *bp) xfs_dir3_leafn_write_verify() argument
297 xfs_dir3_leaf_init( struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_buf *bp, xfs_ino_t owner, __uint16_t type) xfs_dir3_leaf_init() argument
913 xfs_dir3_leaf_compact( xfs_da_args_t *args, struct xfs_dir3_icleaf_hdr *leafhdr, struct xfs_buf *bp) xfs_dir3_leaf_compact() argument
1057 xfs_dir3_leaf_log_bests( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir3_leaf_log_bests() argument
1083 xfs_dir3_leaf_log_ents( struct xfs_da_args *args, struct xfs_buf *bp, int first, int last) xfs_dir3_leaf_log_ents() argument
1111 xfs_dir3_leaf_log_header( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir3_leaf_log_header() argument
1131 xfs_dir3_leaf_log_tail( struct xfs_da_args *args, struct xfs_buf *bp) xfs_dir3_leaf_log_tail() argument
H A Dxfs_attr.c570 struct xfs_buf *bp; xfs_attr_leaf_addname() local
580 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_addname()
588 retval = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_addname()
590 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_addname()
594 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_addname()
622 retval = xfs_attr3_leaf_add(bp, args); xfs_attr_leaf_addname()
719 -1, &bp); xfs_attr_leaf_addname()
723 xfs_attr3_leaf_remove(bp, args); xfs_attr_leaf_addname()
728 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_leaf_addname()
730 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_leaf_addname()
731 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_leaf_addname()
777 struct xfs_buf *bp; xfs_attr_leaf_removename() local
787 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_removename()
791 error = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_removename()
793 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_removename()
797 xfs_attr3_leaf_remove(bp, args); xfs_attr_leaf_removename()
802 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_leaf_removename()
804 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_leaf_removename()
805 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_leaf_removename()
836 struct xfs_buf *bp; xfs_attr_leaf_get() local
842 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); xfs_attr_leaf_get()
846 error = xfs_attr3_leaf_lookup_int(bp, args); xfs_attr_leaf_get()
848 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_get()
851 error = xfs_attr3_leaf_getvalue(bp, args); xfs_attr_leaf_get()
852 xfs_trans_brelse(args->trans, bp); xfs_attr_leaf_get()
929 retval = xfs_attr3_leaf_add(blk->bp, state->args); xfs_attr_node_addname()
1080 error = xfs_attr3_leaf_remove(blk->bp, args); xfs_attr_node_addname()
1148 struct xfs_buf *bp; xfs_attr_node_removename() local
1177 ASSERT(blk->bp != NULL); xfs_attr_node_removename()
1214 retval = xfs_attr3_leaf_remove(blk->bp, args); xfs_attr_node_removename()
1257 ASSERT(state->path.blk[0].bp); xfs_attr_node_removename()
1258 state->path.blk[0].bp = NULL; xfs_attr_node_removename()
1260 error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp); xfs_attr_node_removename()
1264 if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_attr_node_removename()
1266 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); xfs_attr_node_removename()
1267 /* bp is gone due to xfs_da_shrink_inode */ xfs_attr_node_removename()
1288 xfs_trans_brelse(args->trans, bp); xfs_attr_node_removename()
1319 if (blk->bp) { xfs_attr_fillstate()
1320 blk->disk_blkno = XFS_BUF_ADDR(blk->bp); xfs_attr_fillstate()
1321 blk->bp = NULL; xfs_attr_fillstate()
1334 if (blk->bp) { xfs_attr_fillstate()
1335 blk->disk_blkno = XFS_BUF_ADDR(blk->bp); xfs_attr_fillstate()
1336 blk->bp = NULL; xfs_attr_fillstate()
1371 &blk->bp, XFS_ATTR_FORK); xfs_attr_refillstate()
1375 blk->bp = NULL; xfs_attr_refillstate()
1390 &blk->bp, XFS_ATTR_FORK); xfs_attr_refillstate()
1394 blk->bp = NULL; xfs_attr_refillstate()
1430 ASSERT(blk->bp != NULL); xfs_attr_node_get()
1436 retval = xfs_attr3_leaf_getvalue(blk->bp, args); xfs_attr_node_get()
1447 xfs_trans_brelse(args->trans, state->path.blk[i].bp); xfs_attr_node_get()
1448 state->path.blk[i].bp = NULL; xfs_attr_node_get()
H A Dxfs_btree.c60 struct xfs_buf *bp) /* buffer for block, if any */ xfs_btree_check_lblock()
72 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); xfs_btree_check_lblock()
92 if (bp) xfs_btree_check_lblock()
93 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_btree_check_lblock()
105 struct xfs_buf *bp) /* buffer containing block */ xfs_btree_check_sblock()
123 bp ? bp->b_bn : XFS_BUF_DADDR_NULL); xfs_btree_check_sblock()
141 if (bp) xfs_btree_check_sblock()
142 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_btree_check_sblock()
157 struct xfs_buf *bp) /* buffer containing block, if any */ xfs_btree_check_block()
160 return xfs_btree_check_lblock(cur, block, level, bp); xfs_btree_check_block()
162 return xfs_btree_check_sblock(cur, block, level, bp); xfs_btree_check_block()
231 struct xfs_buf *bp) xfs_btree_lblock_calc_crc()
233 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_lblock_calc_crc()
234 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_btree_lblock_calc_crc()
236 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_lblock_calc_crc()
240 xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); xfs_btree_lblock_calc_crc()
245 struct xfs_buf *bp) xfs_btree_lblock_verify_crc()
247 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_lblock_verify_crc()
248 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_btree_lblock_verify_crc()
253 return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); xfs_btree_lblock_verify_crc()
269 struct xfs_buf *bp) xfs_btree_sblock_calc_crc()
271 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_sblock_calc_crc()
272 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_btree_sblock_calc_crc()
274 if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) xfs_btree_sblock_calc_crc()
278 xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); xfs_btree_sblock_calc_crc()
283 struct xfs_buf *bp) xfs_btree_sblock_verify_crc()
285 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_sblock_verify_crc()
286 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_btree_sblock_verify_crc()
291 return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); xfs_btree_sblock_verify_crc()
344 xfs_buf_t *bp; /* btree block's buffer pointer */ xfs_btree_dup_cursor() local
370 bp = cur->bc_bufs[i]; xfs_btree_dup_cursor()
371 if (bp) { xfs_btree_dup_cursor()
373 XFS_BUF_ADDR(bp), mp->m_bsize, xfs_btree_dup_cursor()
374 0, &bp, xfs_btree_dup_cursor()
382 new->bc_bufs[i] = bp; xfs_btree_dup_cursor()
606 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_islastblock() local
608 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_islastblock()
609 xfs_btree_check_block(cur, block, level, bp); xfs_btree_islastblock()
626 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_firstrec() local
631 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_firstrec()
632 xfs_btree_check_block(cur, block, level, bp); xfs_btree_firstrec()
655 xfs_buf_t *bp; /* buffer containing block */ xfs_btree_lastrec() local
660 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_lastrec()
661 xfs_btree_check_block(cur, block, level, bp); xfs_btree_lastrec()
724 struct xfs_buf *bp; /* return value */ xfs_btree_read_bufl() local
731 mp->m_bsize, lock, &bp, ops); xfs_btree_read_bufl()
734 if (bp) xfs_btree_read_bufl()
735 xfs_buf_set_ref(bp, refval); xfs_btree_read_bufl()
736 *bpp = bp; xfs_btree_read_bufl()
898 * Set the buffer for level "lev" in the cursor to bp, releasing
905 xfs_buf_t *bp) /* new buffer to set */ xfs_btree_setbuf()
911 cur->bc_bufs[lev] = bp; xfs_btree_setbuf()
914 b = XFS_BUF_TO_BLOCK(bp); xfs_btree_setbuf()
1040 struct xfs_buf *bp, xfs_btree_init_block()
1047 xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_init_block()
1054 struct xfs_buf *bp, xfs_btree_init_block_cur()
1071 xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_init_block_cur()
1103 struct xfs_buf *bp, xfs_btree_buf_to_ptr()
1108 XFS_BUF_ADDR(bp))); xfs_btree_buf_to_ptr()
1111 XFS_BUF_ADDR(bp))); xfs_btree_buf_to_ptr()
1118 struct xfs_buf *bp) xfs_btree_set_refs()
1123 xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF); xfs_btree_set_refs()
1127 xfs_buf_set_ref(bp, XFS_INO_BTREE_REF); xfs_btree_set_refs()
1130 xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF); xfs_btree_set_refs()
1299 struct xfs_buf *bp, xfs_btree_log_keys()
1304 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_keys()
1306 if (bp) { xfs_btree_log_keys()
1307 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_keys()
1308 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_keys()
1325 struct xfs_buf *bp, xfs_btree_log_recs()
1330 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_recs()
1332 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_recs()
1333 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_recs()
1346 struct xfs_buf *bp, /* buffer containing btree block */ xfs_btree_log_ptrs()
1351 XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_btree_log_ptrs()
1353 if (bp) { xfs_btree_log_ptrs()
1354 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_btree_log_ptrs()
1357 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_ptrs()
1358 xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_log_ptrs()
1375 struct xfs_buf *bp, /* buffer containing btree block */ xfs_btree_log_block()
1409 XFS_BTREE_TRACE_ARGBI(cur, bp, fields); xfs_btree_log_block()
1411 if (bp) { xfs_btree_log_block()
1432 xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_btree_log_block()
1433 xfs_trans_log_buf(cur->bc_tp, bp, first, last); xfs_btree_log_block()
1454 struct xfs_buf *bp; xfs_btree_increment() local
1467 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_increment()
1470 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_increment()
1491 block = xfs_btree_get_block(cur, lev, &bp); xfs_btree_increment()
1494 error = xfs_btree_check_block(cur, block, lev, bp); xfs_btree_increment()
1523 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_increment()
1528 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_increment()
1532 xfs_btree_setbuf(cur, lev, bp); xfs_btree_increment()
1561 xfs_buf_t *bp; xfs_btree_decrement() local
1579 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_decrement()
1582 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_decrement()
1622 for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { xfs_btree_decrement()
1627 error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); xfs_btree_decrement()
1630 xfs_btree_setbuf(cur, lev, bp); xfs_btree_decrement()
1655 struct xfs_buf *bp; /* buffer pointer for btree block */ xfs_btree_lookup_get_block() local
1671 bp = cur->bc_bufs[level]; xfs_btree_lookup_get_block()
1672 if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { xfs_btree_lookup_get_block()
1673 *blkp = XFS_BUF_TO_BLOCK(bp); xfs_btree_lookup_get_block()
1677 error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); xfs_btree_lookup_get_block()
1681 xfs_btree_setbuf(cur, level, bp); xfs_btree_lookup_get_block()
1877 struct xfs_buf *bp; xfs_btree_updkey() local
1896 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_updkey()
1898 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_updkey()
1907 xfs_btree_log_keys(cur, bp, ptr, ptr); xfs_btree_updkey()
1925 struct xfs_buf *bp; xfs_btree_update() local
1934 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_update()
1937 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_update()
1947 xfs_btree_log_recs(cur, bp, ptr, ptr); xfs_btree_update()
2719 struct xfs_buf *bp; /* buffer containing block */ xfs_btree_new_root() local
2759 block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); xfs_btree_new_root()
2762 error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); xfs_btree_new_root()
2770 lbp = bp; xfs_btree_new_root()
2776 bp = rbp; xfs_btree_new_root()
2780 rbp = bp; xfs_btree_new_root()
2787 bp = lbp; xfs_btree_new_root()
2918 struct xfs_buf *bp; /* buffer for block */ xfs_btree_insrec() local
2965 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec()
2969 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec()
3001 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_insrec()
3005 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_insrec()
3046 xfs_btree_log_ptrs(cur, bp, ptr, numrecs); xfs_btree_insrec()
3047 xfs_btree_log_keys(cur, bp, ptr, numrecs); xfs_btree_insrec()
3065 xfs_btree_log_recs(cur, bp, ptr, numrecs); xfs_btree_insrec()
3075 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_insrec()
3305 struct xfs_buf *bp, xfs_btree_kill_root()
3320 error = cur->bc_ops->free_block(cur, bp); xfs_btree_kill_root()
3370 struct xfs_buf *bp; /* buffer for block */ xfs_btree_delrec() local
3403 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_delrec()
3407 error = xfs_btree_check_block(cur, block, level, bp); xfs_btree_delrec()
3442 xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3443 xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3458 xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); xfs_btree_delrec()
3476 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); xfs_btree_delrec()
3520 error = xfs_btree_kill_root(cur, bp, level, pp); xfs_btree_delrec()
3735 rbp = bp; xfs_btree_delrec()
3752 lbp = bp; xfs_btree_delrec()
3842 if (bp != lbp) { xfs_btree_delrec()
3936 struct xfs_buf *bp; /* buffer pointer */ xfs_btree_get_rec() local
3943 block = xfs_btree_get_block(cur, 0, &bp); xfs_btree_get_rec()
3946 error = xfs_btree_check_block(cur, block, 0, bp); xfs_btree_get_rec()
3999 struct xfs_buf *bp; xfs_btree_block_change_owner() local
4006 block = xfs_btree_get_block(cur, level, &bp); xfs_btree_block_change_owner()
4019 if (bp) { xfs_btree_block_change_owner()
4021 xfs_trans_ordered_buf(cur->bc_tp, bp); xfs_btree_block_change_owner()
4022 xfs_btree_log_block(cur, bp, XFS_BB_OWNER); xfs_btree_block_change_owner()
4024 xfs_buf_delwri_queue(bp, buffer_list); xfs_btree_block_change_owner()
56 xfs_btree_check_lblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_lblock() argument
101 xfs_btree_check_sblock( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_sblock() argument
153 xfs_btree_check_block( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp) xfs_btree_check_block() argument
230 xfs_btree_lblock_calc_crc( struct xfs_buf *bp) xfs_btree_lblock_calc_crc() argument
244 xfs_btree_lblock_verify_crc( struct xfs_buf *bp) xfs_btree_lblock_verify_crc() argument
268 xfs_btree_sblock_calc_crc( struct xfs_buf *bp) xfs_btree_sblock_calc_crc() argument
282 xfs_btree_sblock_verify_crc( struct xfs_buf *bp) xfs_btree_sblock_verify_crc() argument
902 xfs_btree_setbuf( xfs_btree_cur_t *cur, int lev, xfs_buf_t *bp) xfs_btree_setbuf() argument
1038 xfs_btree_init_block( struct xfs_mount *mp, struct xfs_buf *bp, __u32 magic, __u16 level, __u16 numrecs, __u64 owner, unsigned int flags) xfs_btree_init_block() argument
1052 xfs_btree_init_block_cur( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, int numrecs) xfs_btree_init_block_cur() argument
1101 xfs_btree_buf_to_ptr( struct xfs_btree_cur *cur, struct xfs_buf *bp, union xfs_btree_ptr *ptr) xfs_btree_buf_to_ptr() argument
1116 xfs_btree_set_refs( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_btree_set_refs() argument
1297 xfs_btree_log_keys( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_keys() argument
1323 xfs_btree_log_recs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_recs() argument
1344 xfs_btree_log_ptrs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) xfs_btree_log_ptrs() argument
1373 xfs_btree_log_block( struct xfs_btree_cur *cur, struct xfs_buf *bp, int fields) xfs_btree_log_block() argument
3303 xfs_btree_kill_root( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, union xfs_btree_ptr *newroot) xfs_btree_kill_root() argument
H A Dxfs_bmap_btree.c522 struct xfs_buf *bp) xfs_bmbt_free_block()
527 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp)); xfs_bmbt_free_block()
534 xfs_trans_binval(tp, bp); xfs_bmbt_free_block()
641 struct xfs_buf *bp) xfs_bmbt_verify()
643 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_bmbt_verify()
644 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); xfs_bmbt_verify()
653 if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn) xfs_bmbt_verify()
696 struct xfs_buf *bp) xfs_bmbt_read_verify()
698 if (!xfs_btree_lblock_verify_crc(bp)) xfs_bmbt_read_verify()
699 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_bmbt_read_verify()
700 else if (!xfs_bmbt_verify(bp)) xfs_bmbt_read_verify()
701 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_bmbt_read_verify()
703 if (bp->b_error) { xfs_bmbt_read_verify()
704 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_bmbt_read_verify()
705 xfs_verifier_error(bp); xfs_bmbt_read_verify()
711 struct xfs_buf *bp) xfs_bmbt_write_verify()
713 if (!xfs_bmbt_verify(bp)) { xfs_bmbt_write_verify()
714 trace_xfs_btree_corrupt(bp, _RET_IP_); xfs_bmbt_write_verify()
715 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_bmbt_write_verify()
716 xfs_verifier_error(bp); xfs_bmbt_write_verify()
719 xfs_btree_lblock_calc_crc(bp); xfs_bmbt_write_verify()
520 xfs_bmbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) xfs_bmbt_free_block() argument
640 xfs_bmbt_verify( struct xfs_buf *bp) xfs_bmbt_verify() argument
695 xfs_bmbt_read_verify( struct xfs_buf *bp) xfs_bmbt_read_verify() argument
710 xfs_bmbt_write_verify( struct xfs_buf *bp) xfs_bmbt_write_verify() argument
H A Dxfs_sb.c568 struct xfs_buf *bp, xfs_sb_verify()
571 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_verify()
578 __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); xfs_sb_verify()
584 return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR, xfs_sb_verify()
602 struct xfs_buf *bp) xfs_sb_read_verify()
604 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_read_verify()
605 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); xfs_sb_read_verify()
617 if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) { xfs_sb_read_verify()
619 if (bp->b_bn == XFS_SB_DADDR || xfs_sb_read_verify()
626 error = xfs_sb_verify(bp, true); xfs_sb_read_verify()
630 xfs_buf_ioerror(bp, error); xfs_sb_read_verify()
632 xfs_verifier_error(bp); xfs_sb_read_verify()
644 struct xfs_buf *bp) xfs_sb_quiet_read_verify()
646 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp); xfs_sb_quiet_read_verify()
650 xfs_sb_read_verify(bp); xfs_sb_quiet_read_verify()
654 xfs_buf_ioerror(bp, -EWRONGFS); xfs_sb_quiet_read_verify()
659 struct xfs_buf *bp) xfs_sb_write_verify()
661 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_sb_write_verify()
662 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_sb_write_verify()
665 error = xfs_sb_verify(bp, false); xfs_sb_write_verify()
667 xfs_buf_ioerror(bp, error); xfs_sb_write_verify()
668 xfs_verifier_error(bp); xfs_sb_write_verify()
676 XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_sb_write_verify()
678 xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF); xfs_sb_write_verify()
811 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); xfs_log_sb() local
817 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); xfs_log_sb()
818 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); xfs_log_sb()
819 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); xfs_log_sb()
567 xfs_sb_verify( struct xfs_buf *bp, bool check_version) xfs_sb_verify() argument
601 xfs_sb_read_verify( struct xfs_buf *bp) xfs_sb_read_verify() argument
643 xfs_sb_quiet_read_verify( struct xfs_buf *bp) xfs_sb_quiet_read_verify() argument
658 xfs_sb_write_verify( struct xfs_buf *bp) xfs_sb_write_verify() argument
H A Dxfs_dir2.h158 struct xfs_buf *bp);
163 struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
165 struct xfs_buf *bp);
167 struct xfs_buf *bp, struct xfs_dir2_data_unused *dup);
169 struct xfs_buf *bp, xfs_dir2_data_aoff_t offset,
172 struct xfs_buf *bp, struct xfs_dir2_data_unused *dup,
H A Dxfs_alloc.c461 struct xfs_buf *bp) xfs_agfl_verify()
463 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_verify()
464 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); xfs_agfl_verify()
477 if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno) xfs_agfl_verify()
487 be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)); xfs_agfl_verify()
492 struct xfs_buf *bp) xfs_agfl_read_verify()
494 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_read_verify()
505 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) xfs_agfl_read_verify()
506 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agfl_read_verify()
507 else if (!xfs_agfl_verify(bp)) xfs_agfl_read_verify()
508 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agfl_read_verify()
510 if (bp->b_error) xfs_agfl_read_verify()
511 xfs_verifier_error(bp); xfs_agfl_read_verify()
516 struct xfs_buf *bp) xfs_agfl_write_verify()
518 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agfl_write_verify()
519 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agfl_write_verify()
525 if (!xfs_agfl_verify(bp)) { xfs_agfl_write_verify()
526 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agfl_write_verify()
527 xfs_verifier_error(bp); xfs_agfl_write_verify()
532 XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agfl_write_verify()
534 xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF); xfs_agfl_write_verify()
553 xfs_buf_t *bp; /* return value */ xfs_alloc_read_agfl() local
560 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); xfs_alloc_read_agfl()
563 xfs_buf_set_ref(bp, XFS_AGFL_REF); xfs_alloc_read_agfl()
564 *bpp = bp; xfs_alloc_read_agfl()
1528 xfs_buf_t *bp; xfs_alloc_ag_vextent_small() local
1530 bp = xfs_btree_get_bufs(args->mp, args->tp, xfs_alloc_ag_vextent_small()
1532 xfs_trans_binval(args->tp, bp); xfs_alloc_ag_vextent_small()
2008 struct xfs_buf *bp; xfs_alloc_fix_freelist() local
2016 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); xfs_alloc_fix_freelist()
2017 xfs_trans_binval(tp, bp); xfs_alloc_fix_freelist()
2148 xfs_buf_t *bp, /* buffer for a.g. freelist header */ xfs_alloc_log_agf()
2170 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_); xfs_alloc_log_agf()
2172 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF); xfs_alloc_log_agf()
2175 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); xfs_alloc_log_agf()
2188 xfs_buf_t *bp; xfs_alloc_pagf_init() local
2191 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp))) xfs_alloc_pagf_init()
2193 if (bp) xfs_alloc_pagf_init()
2194 xfs_trans_brelse(tp, bp); xfs_alloc_pagf_init()
2261 struct xfs_buf *bp) xfs_agf_verify()
2263 struct xfs_agf *agf = XFS_BUF_TO_AGF(bp); xfs_agf_verify()
2269 be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn))) xfs_agf_verify()
2291 if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno) xfs_agf_verify()
2304 struct xfs_buf *bp) xfs_agf_read_verify()
2306 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agf_read_verify()
2309 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) xfs_agf_read_verify()
2310 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agf_read_verify()
2311 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp, xfs_agf_read_verify()
2314 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agf_read_verify()
2316 if (bp->b_error) xfs_agf_read_verify()
2317 xfs_verifier_error(bp); xfs_agf_read_verify()
2322 struct xfs_buf *bp) xfs_agf_write_verify()
2324 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agf_write_verify()
2325 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agf_write_verify()
2327 if (!xfs_agf_verify(mp, bp)) { xfs_agf_write_verify()
2328 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agf_write_verify()
2329 xfs_verifier_error(bp); xfs_agf_write_verify()
2337 XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agf_write_verify()
2339 xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF); xfs_agf_write_verify()
460 xfs_agfl_verify( struct xfs_buf *bp) xfs_agfl_verify() argument
491 xfs_agfl_read_verify( struct xfs_buf *bp) xfs_agfl_read_verify() argument
515 xfs_agfl_write_verify( struct xfs_buf *bp) xfs_agfl_write_verify() argument
2146 xfs_alloc_log_agf( xfs_trans_t *tp, xfs_buf_t *bp, int fields) xfs_alloc_log_agf() argument
2259 xfs_agf_verify( struct xfs_mount *mp, struct xfs_buf *bp) xfs_agf_verify() argument
2303 xfs_agf_read_verify( struct xfs_buf *bp) xfs_agf_read_verify() argument
2321 xfs_agf_write_verify( struct xfs_buf *bp) xfs_agf_write_verify() argument
H A Dxfs_inode_buf.h47 #define xfs_inobp_check(mp, bp)
H A Dxfs_sb.h30 extern void xfs_sb_calc_crc(struct xfs_buf *bp);
H A Dxfs_ialloc.c2429 xfs_buf_t *bp, /* allocation group header buffer */ xfs_ialloc_log_agi()
2454 agi = XFS_BUF_TO_AGI(bp); xfs_ialloc_log_agi()
2458 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF); xfs_ialloc_log_agi()
2468 xfs_trans_log_buf(tp, bp, first, last); xfs_ialloc_log_agi()
2479 xfs_trans_log_buf(tp, bp, first, last); xfs_ialloc_log_agi()
2499 struct xfs_buf *bp) xfs_agi_verify()
2501 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_verify()
2502 struct xfs_agi *agi = XFS_BUF_TO_AGI(bp); xfs_agi_verify()
2508 be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn))) xfs_agi_verify()
2528 if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno) xfs_agi_verify()
2537 struct xfs_buf *bp) xfs_agi_read_verify()
2539 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_read_verify()
2542 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) xfs_agi_read_verify()
2543 xfs_buf_ioerror(bp, -EFSBADCRC); xfs_agi_read_verify()
2544 else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, xfs_agi_read_verify()
2547 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agi_read_verify()
2549 if (bp->b_error) xfs_agi_read_verify()
2550 xfs_verifier_error(bp); xfs_agi_read_verify()
2555 struct xfs_buf *bp) xfs_agi_write_verify()
2557 struct xfs_mount *mp = bp->b_target->bt_mount; xfs_agi_write_verify()
2558 struct xfs_buf_log_item *bip = bp->b_fspriv; xfs_agi_write_verify()
2560 if (!xfs_agi_verify(bp)) { xfs_agi_write_verify()
2561 xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_agi_write_verify()
2562 xfs_verifier_error(bp); xfs_agi_write_verify()
2570 XFS_BUF_TO_AGI(bp)->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_agi_write_verify()
2571 xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF); xfs_agi_write_verify()
2649 xfs_buf_t *bp = NULL; xfs_ialloc_pagi_init() local
2652 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); xfs_ialloc_pagi_init()
2655 if (bp) xfs_ialloc_pagi_init()
2656 xfs_trans_brelse(tp, bp); xfs_ialloc_pagi_init()
2427 xfs_ialloc_log_agi( xfs_trans_t *tp, xfs_buf_t *bp, int fields) xfs_ialloc_log_agi() argument
2498 xfs_agi_verify( struct xfs_buf *bp) xfs_agi_verify() argument
2536 xfs_agi_read_verify( struct xfs_buf *bp) xfs_agi_read_verify() argument
2554 xfs_agi_write_verify( struct xfs_buf *bp) xfs_agi_write_verify() argument
/linux-4.4.14/drivers/power/
H A Dapm_power.c47 struct find_bat_param *bp = (struct find_bat_param *)data; __find_main_battery() local
49 bp->bat = dev_get_drvdata(dev); __find_main_battery()
51 if (bp->bat->desc->use_for_apm) { __find_main_battery()
53 bp->main = bp->bat; __find_main_battery()
57 if (!PSY_PROP(bp->bat, CHARGE_FULL_DESIGN, &bp->full) || __find_main_battery()
58 !PSY_PROP(bp->bat, CHARGE_FULL, &bp->full)) { __find_main_battery()
59 if (bp->full.intval > bp->max_charge) { __find_main_battery()
60 bp->max_charge_bat = bp->bat; __find_main_battery()
61 bp->max_charge = bp->full.intval; __find_main_battery()
63 } else if (!PSY_PROP(bp->bat, ENERGY_FULL_DESIGN, &bp->full) || __find_main_battery()
64 !PSY_PROP(bp->bat, ENERGY_FULL, &bp->full)) { __find_main_battery()
65 if (bp->full.intval > bp->max_energy) { __find_main_battery()
66 bp->max_energy_bat = bp->bat; __find_main_battery()
67 bp->max_energy = bp->full.intval; __find_main_battery()
75 struct find_bat_param bp; find_main_battery() local
78 memset(&bp, 0, sizeof(struct find_bat_param)); find_main_battery()
80 bp.main = main_battery; find_main_battery()
82 error = class_for_each_device(power_supply_class, NULL, &bp, find_main_battery()
85 main_battery = bp.main; find_main_battery()
89 if ((bp.max_energy_bat && bp.max_charge_bat) && find_main_battery()
90 (bp.max_energy_bat != bp.max_charge_bat)) { find_main_battery()
92 if (!PSY_PROP(bp.max_charge_bat, VOLTAGE_MAX_DESIGN, find_main_battery()
93 &bp.full)) { find_main_battery()
94 if (bp.max_energy > bp.max_charge * bp.full.intval) find_main_battery()
95 main_battery = bp.max_energy_bat; find_main_battery()
97 main_battery = bp.max_charge_bat; find_main_battery()
98 } else if (!PSY_PROP(bp.max_energy_bat, VOLTAGE_MAX_DESIGN, find_main_battery()
99 &bp.full)) { find_main_battery()
100 if (bp.max_charge > bp.max_energy / bp.full.intval) find_main_battery()
101 main_battery = bp.max_charge_bat; find_main_battery()
103 main_battery = bp.max_energy_bat; find_main_battery()
106 main_battery = bp.max_energy_bat; find_main_battery()
108 } else if (bp.max_charge_bat) { find_main_battery()
109 main_battery = bp.max_charge_bat; find_main_battery()
110 } else if (bp.max_energy_bat) { find_main_battery()
111 main_battery = bp.max_energy_bat; find_main_battery()
114 main_battery = bp.bat; find_main_battery()
/linux-4.4.14/arch/tile/lib/
H A Dcpumask.c25 int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) bitmap_parselist_crop() argument
31 if (!isdigit(*bp)) bitmap_parselist_crop()
33 a = simple_strtoul(bp, (char **)&bp, 10); bitmap_parselist_crop()
35 if (*bp == '-') { bitmap_parselist_crop()
36 bp++; bitmap_parselist_crop()
37 if (!isdigit(*bp)) bitmap_parselist_crop()
39 b = simple_strtoul(bp, (char **)&bp, 10); bitmap_parselist_crop()
49 if (*bp == ',') bitmap_parselist_crop()
50 bp++; bitmap_parselist_crop()
51 } while (*bp != '\0' && *bp != '\n'); bitmap_parselist_crop()
/linux-4.4.14/include/linux/
H A Dhw_breakpoint.h31 static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) hw_breakpoint_addr() argument
33 return bp->attr.bp_addr; hw_breakpoint_addr()
36 static inline int hw_breakpoint_type(struct perf_event *bp) hw_breakpoint_type() argument
38 return bp->attr.bp_type; hw_breakpoint_type()
41 static inline unsigned long hw_breakpoint_len(struct perf_event *bp) hw_breakpoint_len() argument
43 return bp->attr.bp_len; hw_breakpoint_len()
54 modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
70 extern int register_perf_hw_breakpoint(struct perf_event *bp);
71 extern int __register_perf_hw_breakpoint(struct perf_event *bp);
72 extern void unregister_hw_breakpoint(struct perf_event *bp);
75 extern int dbg_reserve_bp_slot(struct perf_event *bp);
76 extern int dbg_release_bp_slot(struct perf_event *bp);
77 extern int reserve_bp_slot(struct perf_event *bp);
78 extern void release_bp_slot(struct perf_event *bp);
82 static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) counter_arch_bp() argument
84 return &bp->hw.info; counter_arch_bp()
97 modify_user_hw_breakpoint(struct perf_event *bp, modify_user_hw_breakpoint() argument
109 register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } register_perf_hw_breakpoint() argument
111 __register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } unregister_hw_breakpoint() argument
112 static inline void unregister_hw_breakpoint(struct perf_event *bp) { } unregister_hw_breakpoint() argument
116 reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; } release_bp_slot() argument
117 static inline void release_bp_slot(struct perf_event *bp) { } release_bp_slot() argument
121 static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp) counter_arch_bp() argument
H A Dhdlcdrv.h116 unsigned char *bp; member in struct:hdlcdrv_state::hdlcdrv_hdlcrx
139 unsigned char *bp; member in struct:hdlcdrv_state::hdlcdrv_hdlctx
/linux-4.4.14/arch/x86/include/asm/
H A Dstacktrace.h19 unsigned long bp,
27 unsigned long *stack, unsigned long bp,
33 unsigned long *stack, unsigned long bp,
47 unsigned long *stack, unsigned long bp,
52 #define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
55 #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
62 unsigned long bp; stack_frame() local
65 return regs->bp; stack_frame()
68 /* Grab bp right from our regs */ stack_frame()
69 get_bp(bp); stack_frame()
70 return bp; stack_frame()
73 /* bp is the last reg pushed by switch_to */ stack_frame()
86 unsigned long *stack, unsigned long bp, char *log_lvl);
90 unsigned long *sp, unsigned long bp, char *log_lvl);
H A Dframe.h10 __ASM_SIZE(push,) %__ASM_REG(bp)
11 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
14 __ASM_SIZE(pop,) %__ASM_REG(bp)
H A Dhw_breakpoint.h54 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
55 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
60 int arch_install_hw_breakpoint(struct perf_event *bp);
61 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
62 void hw_breakpoint_pmu_read(struct perf_event *bp);
63 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
66 arch_fill_perf_breakpoint(struct perf_event *bp);
H A Dkdebug.h28 unsigned long *sp, unsigned long bp);
H A Da.out-core.h47 dump->regs.bp = regs->bp; aout_dump_thread()
H A Dasm.h41 #define _ASM_BP __ASM_REG(bp)
H A Dsyscall.h138 *args++ = regs->bp; syscall_get_arguments()
199 regs->bp = *args++; syscall_set_arguments()
H A Delf.h111 _r->si = 0; _r->di = 0; _r->bp = 0; \
127 pr_reg[5] = regs->bp; \
176 regs->si = regs->di = regs->bp = 0; elf_common_init()
212 (pr_reg)[4] = (regs)->bp; \
/linux-4.4.14/arch/powerpc/sysdev/
H A Dgrackle.c30 static inline void grackle_set_stg(struct pci_controller* bp, int enable) grackle_set_stg() argument
34 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_stg()
35 val = in_le32(bp->cfg_data); grackle_set_stg()
38 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_stg()
39 out_le32(bp->cfg_data, val); grackle_set_stg()
40 (void)in_le32(bp->cfg_data); grackle_set_stg()
43 static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) grackle_set_loop_snoop() argument
47 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_loop_snoop()
48 val = in_le32(bp->cfg_data); grackle_set_loop_snoop()
51 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); grackle_set_loop_snoop()
52 out_le32(bp->cfg_data, val); grackle_set_loop_snoop()
53 (void)in_le32(bp->cfg_data); grackle_set_loop_snoop()
H A Dcpm2.c121 u32 __iomem *bp; __cpm2_setbrg() local
127 bp = cpm2_map_size(im_brgc1, 16); __cpm2_setbrg()
129 bp = cpm2_map_size(im_brgc5, 16); __cpm2_setbrg()
132 bp += brg; __cpm2_setbrg()
138 out_be32(bp, val); __cpm2_setbrg()
139 cpm2_unmap(bp); __cpm2_setbrg()
/linux-4.4.14/drivers/net/ethernet/apple/
H A Dbmac.c228 struct bmac_data *bp = netdev_priv(dev); bmac_enable_and_reset_chip() local
229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_enable_and_reset_chip()
230 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_enable_and_reset_chip()
237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); bmac_enable_and_reset_chip()
312 struct bmac_data *bp = netdev_priv(dev); bmac_init_registers() local
329 if (!bp->is_bmac_plus) { bmac_init_registers()
371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; bmac_init_registers()
372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmac_init_registers()
373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmac_init_registers()
374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmac_init_registers()
375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ bmac_init_registers()
405 struct bmac_data *bp = netdev_priv(dev); bmac_start_chip() local
406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_start_chip()
425 struct bmac_data *bp = netdev_priv(dev); bmac_init_phy() local
435 if (bp->is_bmac_plus) { bmac_init_phy()
459 struct bmac_data *bp = netdev_priv(dev); bmac_suspend() local
466 spin_lock_irqsave(&bp->lock, flags); bmac_suspend()
467 if (bp->timeout_active) { bmac_suspend()
468 del_timer(&bp->tx_timeout); bmac_suspend()
469 bp->timeout_active = 0; bmac_suspend()
472 disable_irq(bp->tx_dma_intr); bmac_suspend()
473 disable_irq(bp->rx_dma_intr); bmac_suspend()
474 bp->sleeping = 1; bmac_suspend()
475 spin_unlock_irqrestore(&bp->lock, flags); bmac_suspend()
476 if (bp->opened) { bmac_suspend()
477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_suspend()
478 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_suspend()
490 if (bp->rx_bufs[i] != NULL) { bmac_suspend()
491 dev_kfree_skb(bp->rx_bufs[i]); bmac_suspend()
492 bp->rx_bufs[i] = NULL; bmac_suspend()
496 if (bp->tx_bufs[i] != NULL) { bmac_suspend()
497 dev_kfree_skb(bp->tx_bufs[i]); bmac_suspend()
498 bp->tx_bufs[i] = NULL; bmac_suspend()
502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_suspend()
509 struct bmac_data *bp = netdev_priv(dev); bmac_resume() local
512 if (bp->opened) bmac_resume()
516 enable_irq(bp->tx_dma_intr); bmac_resume()
517 enable_irq(bp->rx_dma_intr); bmac_resume()
526 struct bmac_data *bp = netdev_priv(dev); bmac_set_address() local
533 spin_lock_irqsave(&bp->lock, flags); bmac_set_address()
544 spin_unlock_irqrestore(&bp->lock, flags); bmac_set_address()
551 struct bmac_data *bp = netdev_priv(dev); bmac_set_timeout() local
554 spin_lock_irqsave(&bp->lock, flags); bmac_set_timeout()
555 if (bp->timeout_active) bmac_set_timeout()
556 del_timer(&bp->tx_timeout); bmac_set_timeout()
557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT; bmac_set_timeout()
558 bp->tx_timeout.function = bmac_tx_timeout; bmac_set_timeout()
559 bp->tx_timeout.data = (unsigned long) dev; bmac_set_timeout()
560 add_timer(&bp->tx_timeout); bmac_set_timeout()
561 bp->timeout_active = 1; bmac_set_timeout()
562 spin_unlock_irqrestore(&bp->lock, flags); bmac_set_timeout()
589 bmac_init_tx_ring(struct bmac_data *bp) bmac_init_tx_ring() argument
591 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_init_tx_ring()
593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); bmac_init_tx_ring()
595 bp->tx_empty = 0; bmac_init_tx_ring()
596 bp->tx_fill = 0; bmac_init_tx_ring()
597 bp->tx_fullup = 0; bmac_init_tx_ring()
600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING], bmac_init_tx_ring()
601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); bmac_init_tx_ring()
606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); bmac_init_tx_ring()
612 struct bmac_data *bp = netdev_priv(dev); bmac_init_rx_ring() local
613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_init_rx_ring()
618 memset((char *)bp->rx_cmds, 0, bmac_init_rx_ring()
621 if ((skb = bp->rx_bufs[i]) == NULL) { bmac_init_rx_ring()
622 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); bmac_init_rx_ring()
626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); bmac_init_rx_ring()
629 bp->rx_empty = 0; bmac_init_rx_ring()
630 bp->rx_fill = i; bmac_init_rx_ring()
633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING], bmac_init_rx_ring()
634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); bmac_init_rx_ring()
638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); bmac_init_rx_ring()
646 struct bmac_data *bp = netdev_priv(dev); bmac_transmit_packet() local
647 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_transmit_packet()
652 /* bp->tx_empty, bp->tx_fill)); */ bmac_transmit_packet()
653 i = bp->tx_fill + 1; bmac_transmit_packet()
656 if (i == bp->tx_empty) { bmac_transmit_packet()
658 bp->tx_fullup = 1; bmac_transmit_packet()
663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); bmac_transmit_packet()
665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); bmac_transmit_packet()
667 bp->tx_bufs[bp->tx_fill] = skb; bmac_transmit_packet()
668 bp->tx_fill = i; bmac_transmit_packet()
682 struct bmac_data *bp = netdev_priv(dev); bmac_rxdma_intr() local
683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_rxdma_intr()
691 spin_lock_irqsave(&bp->lock, flags); bmac_rxdma_intr()
698 i = bp->rx_empty; bmac_rxdma_intr()
701 cp = &bp->rx_cmds[i]; bmac_rxdma_intr()
712 skb = bp->rx_bufs[i]; bmac_rxdma_intr()
713 bp->rx_bufs[i] = NULL; bmac_rxdma_intr()
725 if ((skb = bp->rx_bufs[i]) == NULL) { bmac_rxdma_intr()
726 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); bmac_rxdma_intr()
728 skb_reserve(bp->rx_bufs[i], 2); bmac_rxdma_intr()
730 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); bmac_rxdma_intr()
738 bp->rx_fill = last; bmac_rxdma_intr()
739 bp->rx_empty = i; bmac_rxdma_intr()
743 spin_unlock_irqrestore(&bp->lock, flags); bmac_rxdma_intr()
756 struct bmac_data *bp = netdev_priv(dev); bmac_txdma_intr() local
761 spin_lock_irqsave(&bp->lock, flags); bmac_txdma_intr()
767 /* del_timer(&bp->tx_timeout); */ bmac_txdma_intr()
768 /* bp->timeout_active = 0; */ bmac_txdma_intr()
771 cp = &bp->tx_cmds[bp->tx_empty]; bmac_txdma_intr()
780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) bmac_txdma_intr()
784 if (bp->tx_bufs[bp->tx_empty]) { bmac_txdma_intr()
786 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); bmac_txdma_intr()
788 bp->tx_bufs[bp->tx_empty] = NULL; bmac_txdma_intr()
789 bp->tx_fullup = 0; bmac_txdma_intr()
791 if (++bp->tx_empty >= N_TX_RING) bmac_txdma_intr()
792 bp->tx_empty = 0; bmac_txdma_intr()
793 if (bp->tx_empty == bp->tx_fill) bmac_txdma_intr()
797 spin_unlock_irqrestore(&bp->lock, flags); bmac_txdma_intr()
867 bmac_addhash(struct bmac_data *bp, unsigned char *addr) bmac_addhash() argument
875 if (bp->hash_use_count[crc]++) return; /* This bit is already set */ bmac_addhash()
878 bp->hash_use_count[crc/16] |= mask; bmac_addhash()
882 bmac_removehash(struct bmac_data *bp, unsigned char *addr) bmac_removehash() argument
890 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ bmac_removehash()
891 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ bmac_removehash()
894 bp->hash_table_mask[crc/16] &= mask; bmac_removehash()
934 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) bmac_update_hash_table_mask() argument
936 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmac_update_hash_table_mask()
937 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmac_update_hash_table_mask()
938 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmac_update_hash_table_mask()
939 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ bmac_update_hash_table_mask()
945 struct bmac_data *bp, unsigned char *addr)
948 bmac_addhash(bp, addr);
950 bmac_update_hash_table_mask(dev, bp);
957 struct bmac_data *bp, unsigned char *addr)
959 bmac_removehash(bp, addr);
961 bmac_update_hash_table_mask(dev, bp);
975 struct bmac_data *bp = netdev_priv(dev); bmac_set_multicast() local
980 if (bp->sleeping) bmac_set_multicast()
986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; bmac_set_multicast()
987 bmac_update_hash_table_mask(dev, bp); bmac_set_multicast()
997 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; bmac_set_multicast()
998 for (i=0; i<64; i++) bp->hash_use_count[i] = 0; bmac_set_multicast()
1004 bmac_addhash(bp, ha->addr); bmac_set_multicast()
1005 bmac_update_hash_table_mask(dev, bp); bmac_set_multicast()
1201 struct bmac_data *bp = netdev_priv(dev); bmac_reset_and_enable() local
1206 spin_lock_irqsave(&bp->lock, flags); bmac_reset_and_enable()
1208 bmac_init_tx_ring(bp); bmac_reset_and_enable()
1213 bp->sleeping = 0; bmac_reset_and_enable()
1227 spin_unlock_irqrestore(&bp->lock, flags); bmac_reset_and_enable()
1247 struct bmac_data *bp; bmac_probe() local
1273 bp = netdev_priv(dev); bmac_probe()
1277 bp->mdev = mdev; bmac_probe()
1278 spin_lock_init(&bp->lock); bmac_probe()
1310 bp->is_bmac_plus = is_bmac_plus; bmac_probe()
1311 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); bmac_probe()
1312 if (!bp->tx_dma) bmac_probe()
1314 bp->tx_dma_intr = macio_irq(mdev, 1); bmac_probe()
1315 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); bmac_probe()
1316 if (!bp->rx_dma) bmac_probe()
1318 bp->rx_dma_intr = macio_irq(mdev, 2); bmac_probe()
1320 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); bmac_probe()
1321 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; bmac_probe()
1323 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); bmac_probe()
1324 skb_queue_head_init(bp->queue); bmac_probe()
1326 init_timer(&bp->tx_timeout); bmac_probe()
1333 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); bmac_probe()
1335 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); bmac_probe()
1338 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); bmac_probe()
1340 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); bmac_probe()
1348 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_probe()
1363 free_irq(bp->rx_dma_intr, dev); bmac_probe()
1365 free_irq(bp->tx_dma_intr, dev); bmac_probe()
1369 iounmap(bp->rx_dma); bmac_probe()
1371 iounmap(bp->tx_dma); bmac_probe()
1377 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_probe()
1385 struct bmac_data *bp = netdev_priv(dev); bmac_open() local
1388 bp->opened = 1; bmac_open()
1396 struct bmac_data *bp = netdev_priv(dev); bmac_close() local
1397 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_close()
1398 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_close()
1402 bp->sleeping = 1; bmac_close()
1420 if (bp->rx_bufs[i] != NULL) { bmac_close()
1421 dev_kfree_skb(bp->rx_bufs[i]); bmac_close()
1422 bp->rx_bufs[i] = NULL; bmac_close()
1427 if (bp->tx_bufs[i] != NULL) { bmac_close()
1428 dev_kfree_skb(bp->tx_bufs[i]); bmac_close()
1429 bp->tx_bufs[i] = NULL; bmac_close()
1434 bp->opened = 0; bmac_close()
1436 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); bmac_close()
1444 struct bmac_data *bp = netdev_priv(dev); bmac_start() local
1449 if (bp->sleeping) bmac_start()
1452 spin_lock_irqsave(&bp->lock, flags); bmac_start()
1454 i = bp->tx_fill + 1; bmac_start()
1457 if (i == bp->tx_empty) bmac_start()
1459 skb = skb_dequeue(bp->queue); bmac_start()
1464 spin_unlock_irqrestore(&bp->lock, flags); bmac_start()
1470 struct bmac_data *bp = netdev_priv(dev); bmac_output() local
1471 skb_queue_tail(bp->queue, skb); bmac_output()
1479 struct bmac_data *bp = netdev_priv(dev); bmac_tx_timeout() local
1480 volatile struct dbdma_regs __iomem *td = bp->tx_dma; bmac_tx_timeout()
1481 volatile struct dbdma_regs __iomem *rd = bp->rx_dma; bmac_tx_timeout()
1488 spin_lock_irqsave(&bp->lock, flags); bmac_tx_timeout()
1489 bp->timeout_active = 0; bmac_tx_timeout()
1492 /* bmac_handle_misc_intrs(bp, 0); */ bmac_tx_timeout()
1494 cp = &bp->tx_cmds[bp->tx_empty]; bmac_tx_timeout()
1496 /* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ bmac_tx_timeout()
1517 bp->tx_empty, bp->tx_fill, bp->tx_fullup)); bmac_tx_timeout()
1518 i = bp->tx_empty; bmac_tx_timeout()
1520 if (i != bp->tx_fill) { bmac_tx_timeout()
1521 dev_kfree_skb(bp->tx_bufs[i]); bmac_tx_timeout()
1522 bp->tx_bufs[i] = NULL; bmac_tx_timeout()
1524 bp->tx_empty = i; bmac_tx_timeout()
1526 bp->tx_fullup = 0; bmac_tx_timeout()
1528 if (i != bp->tx_fill) { bmac_tx_timeout()
1529 cp = &bp->tx_cmds[i]; bmac_tx_timeout()
1544 spin_unlock_irqrestore(&bp->lock, flags); bmac_tx_timeout()
1605 struct bmac_data *bp = netdev_priv(dev); bmac_remove() local
1610 free_irq(bp->tx_dma_intr, dev); bmac_remove()
1611 free_irq(bp->rx_dma_intr, dev); bmac_remove()
1614 iounmap(bp->tx_dma); bmac_remove()
1615 iounmap(bp->rx_dma); bmac_remove()
/linux-4.4.14/drivers/net/fddi/
H A Ddefxx.c266 static void dfx_bus_config_check(DFX_board_t *bp);
271 static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
276 static void dfx_int_pr_halt_id(DFX_board_t *bp);
277 static void dfx_int_type_0_process(DFX_board_t *bp);
284 static int dfx_ctl_update_cam(DFX_board_t *bp);
285 static int dfx_ctl_update_filters(DFX_board_t *bp);
287 static int dfx_hw_dma_cmd_req(DFX_board_t *bp);
288 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data);
289 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type);
290 static int dfx_hw_adap_state_rd(DFX_board_t *bp);
291 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
293 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
294 static void dfx_rcv_queue_process(DFX_board_t *bp);
296 static void dfx_rcv_flush(DFX_board_t *bp);
298 static inline void dfx_rcv_flush(DFX_board_t *bp) {} dfx_rcv_flush() argument
303 static int dfx_xmt_done(DFX_board_t *bp); dfx_rcv_flush()
304 static void dfx_xmt_flush(DFX_board_t *bp); dfx_rcv_flush()
326 * bp - pointer to board information dfx_rcv_flush()
352 * bp->base is a valid base I/O address for this adapter. dfx_rcv_flush()
363 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) dfx_writel() argument
365 writel(data, bp->base.mem + offset); dfx_writel()
369 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) dfx_outl() argument
371 outl(data, bp->base.port + offset); dfx_outl()
374 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) dfx_port_write_long() argument
376 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_write_long()
381 dfx_writel(bp, offset, data); dfx_port_write_long()
383 dfx_outl(bp, offset, data); dfx_port_write_long()
387 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) dfx_readl() argument
390 *data = readl(bp->base.mem + offset); dfx_readl()
393 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) dfx_inl() argument
395 *data = inl(bp->base.port + offset); dfx_inl()
398 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) dfx_port_read_long() argument
400 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_read_long()
405 dfx_readl(bp, offset, data); dfx_port_read_long()
407 dfx_inl(bp, offset, data); dfx_port_read_long()
535 DFX_board_t *bp; /* board pointer */ dfx_register() local
547 dev = alloc_fddidev(sizeof(*bp)); dfx_register()
566 bp = netdev_priv(dev); dfx_register()
567 bp->bus_dev = bdev; dfx_register()
617 bp->base.mem = ioremap_nocache(bar_start[0], bar_len[0]); dfx_register()
618 if (!bp->base.mem) { dfx_register()
624 bp->base.port = bar_start[0]; dfx_register()
650 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_register()
654 if (bp->kmalloced) dfx_register()
656 bp->kmalloced, bp->kmalloced_dma); dfx_register()
660 iounmap(bp->base.mem); dfx_register()
708 * bp->base has already been set with the proper
719 DFX_board_t *bp = netdev_priv(dev); dfx_bus_init() local
720 struct device *bdev = bp->bus_dev; dfx_bus_init()
730 bp->dev = dev; dfx_bus_init()
845 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); dfx_bus_init()
870 * bp->base has already been set with the proper
879 DFX_board_t *bp = netdev_priv(dev); dfx_bus_uninit() local
880 struct device *bdev = bp->bus_dev; dfx_bus_uninit()
905 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); dfx_bus_uninit()
923 * bp - pointer to board information
940 static void dfx_bus_config_check(DFX_board_t *bp) dfx_bus_config_check() argument
942 struct device __maybe_unused *bdev = bp->bus_dev; dfx_bus_config_check()
964 status = dfx_hw_port_ctrl_req(bp, dfx_bus_config_check()
979 switch (bp->burst_size) dfx_bus_config_check()
983 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8; dfx_bus_config_check()
992 bp->full_duplex_enb = PI_SNMP_K_FALSE; dfx_bus_config_check()
1039 DFX_board_t *bp = netdev_priv(dev); dfx_driver_init() local
1040 struct device *bdev = bp->bus_dev; dfx_driver_init()
1067 bp->full_duplex_enb = PI_SNMP_K_FALSE; dfx_driver_init()
1068 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */ dfx_driver_init()
1069 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF; dfx_driver_init()
1070 bp->rcv_bufs_to_post = RCV_BUFS_DEF; dfx_driver_init()
1081 dfx_bus_config_check(bp); dfx_driver_init()
1085 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_driver_init()
1089 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); dfx_driver_init()
1093 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0, dfx_driver_init()
1100 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); dfx_driver_init()
1102 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, dfx_driver_init()
1109 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); dfx_driver_init()
1118 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); dfx_driver_init()
1138 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_driver_init()
1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, dfx_driver_init()
1143 &bp->kmalloced_dma, dfx_driver_init()
1148 top_p = bp->kmalloced_dma; /* get physical address of buffer */ dfx_driver_init()
1167 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v; dfx_driver_init()
1168 bp->descr_block_phys = curr_p; dfx_driver_init()
1174 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v; dfx_driver_init()
1175 bp->cmd_req_phys = curr_p; dfx_driver_init()
1181 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v; dfx_driver_init()
1182 bp->cmd_rsp_phys = curr_p; dfx_driver_init()
1188 bp->rcv_block_virt = curr_v; dfx_driver_init()
1189 bp->rcv_block_phys = curr_p; dfx_driver_init()
1192 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); dfx_driver_init()
1193 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); dfx_driver_init()
1198 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v; dfx_driver_init()
1199 bp->cons_block_phys = curr_p; dfx_driver_init()
1204 print_name, bp->descr_block_virt, &bp->descr_block_phys); dfx_driver_init()
1206 print_name, bp->cmd_req_virt, &bp->cmd_req_phys); dfx_driver_init()
1208 print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys); dfx_driver_init()
1210 print_name, bp->rcv_block_virt, &bp->rcv_block_phys); dfx_driver_init()
1212 print_name, bp->cons_block_virt, &bp->cons_block_phys); dfx_driver_init()
1230 * bp - pointer to board information
1243 * bp->reset_type should be set to a valid reset type value before
1251 static int dfx_adap_init(DFX_board_t *bp, int get_buffers) dfx_adap_init() argument
1257 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_adap_init()
1261 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) dfx_adap_init()
1263 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); dfx_adap_init()
1272 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0); dfx_adap_init()
1281 bp->cmd_req_reg.lword = 0; dfx_adap_init()
1282 bp->cmd_rsp_reg.lword = 0; dfx_adap_init()
1283 bp->rcv_xmt_reg.lword = 0; dfx_adap_init()
1287 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); dfx_adap_init()
1291 if (dfx_hw_port_ctrl_req(bp, dfx_adap_init()
1294 bp->burst_size, dfx_adap_init()
1297 printk("%s: Could not set adapter burst size!\n", bp->dev->name); dfx_adap_init()
1308 if (dfx_hw_port_ctrl_req(bp, dfx_adap_init()
1310 bp->cons_block_phys, dfx_adap_init()
1314 printk("%s: Could not set consumer block address!\n", bp->dev->name); dfx_adap_init()
1328 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, dfx_adap_init()
1329 (u32)(bp->descr_block_phys | dfx_adap_init()
1333 bp->dev->name); dfx_adap_init()
1339 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET; dfx_adap_init()
1340 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME; dfx_adap_init()
1341 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */ dfx_adap_init()
1342 bp->cmd_req_virt->char_set.item[0].item_index = 0; dfx_adap_init()
1343 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL; dfx_adap_init()
1344 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init()
1346 printk("%s: DMA command request failed!\n", bp->dev->name); dfx_adap_init()
1352 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET; dfx_adap_init()
1353 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS; dfx_adap_init()
1354 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb; dfx_adap_init()
1355 bp->cmd_req_virt->snmp_set.item[0].item_index = 0; dfx_adap_init()
1356 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ; dfx_adap_init()
1357 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt; dfx_adap_init()
1358 bp->cmd_req_virt->snmp_set.item[1].item_index = 0; dfx_adap_init()
1359 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL; dfx_adap_init()
1360 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init()
1362 printk("%s: DMA command request failed!\n", bp->dev->name); dfx_adap_init()
1368 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) dfx_adap_init()
1370 printk("%s: Adapter CAM update failed!\n", bp->dev->name); dfx_adap_init()
1376 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) dfx_adap_init()
1378 printk("%s: Adapter filters update failed!\n", bp->dev->name); dfx_adap_init()
1388 dfx_rcv_flush(bp); dfx_adap_init()
1392 if (dfx_rcv_init(bp, get_buffers)) dfx_adap_init()
1394 printk("%s: Receive buffer allocation failed\n", bp->dev->name); dfx_adap_init()
1396 dfx_rcv_flush(bp); dfx_adap_init()
1402 bp->cmd_req_virt->cmd_type = PI_CMD_K_START; dfx_adap_init()
1403 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_adap_init()
1405 printk("%s: Start command failed\n", bp->dev->name); dfx_adap_init()
1407 dfx_rcv_flush(bp); dfx_adap_init()
1413 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); dfx_adap_init()
1450 DFX_board_t *bp = netdev_priv(dev); dfx_open() local
1475 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); dfx_open()
1479 memset(bp->uc_table, 0, sizeof(bp->uc_table)); dfx_open()
1480 memset(bp->mc_table, 0, sizeof(bp->mc_table)); dfx_open()
1481 bp->uc_count = 0; dfx_open()
1482 bp->mc_count = 0; dfx_open()
1486 bp->ind_group_prom = PI_FSTATE_K_BLOCK; dfx_open()
1487 bp->group_prom = PI_FSTATE_K_BLOCK; dfx_open()
1489 spin_lock_init(&bp->lock); dfx_open()
1493 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */ dfx_open()
1494 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS) dfx_open()
1541 DFX_board_t *bp = netdev_priv(dev); dfx_close() local
1547 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_close()
1551 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); dfx_close()
1562 dfx_xmt_flush(bp); dfx_close()
1575 bp->cmd_req_reg.lword = 0; dfx_close()
1576 bp->cmd_rsp_reg.lword = 0; dfx_close()
1577 bp->rcv_xmt_reg.lword = 0; dfx_close()
1581 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); dfx_close()
1585 dfx_rcv_flush(bp); dfx_close()
1611 * bp - pointer to board information
1626 static void dfx_int_pr_halt_id(DFX_board_t *bp) dfx_int_pr_halt_id() argument
1633 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_int_pr_halt_id()
1641 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name); dfx_int_pr_halt_id()
1645 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name); dfx_int_pr_halt_id()
1649 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name); dfx_int_pr_halt_id()
1653 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name); dfx_int_pr_halt_id()
1657 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name); dfx_int_pr_halt_id()
1661 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name); dfx_int_pr_halt_id()
1665 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name); dfx_int_pr_halt_id()
1669 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name); dfx_int_pr_halt_id()
1673 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name); dfx_int_pr_halt_id()
1677 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id); dfx_int_pr_halt_id()
1695 * bp - pointer to board information
1730 static void dfx_int_type_0_process(DFX_board_t *bp) dfx_int_type_0_process() argument
1742 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status); dfx_int_type_0_process()
1743 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status); dfx_int_type_0_process()
1754 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name); dfx_int_type_0_process()
1759 printk("%s: Packet Memory Parity Error\n", bp->dev->name); dfx_int_type_0_process()
1764 printk("%s: Host Bus Parity Error\n", bp->dev->name); dfx_int_type_0_process()
1768 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process()
1769 bp->reset_type = 0; /* rerun on-board diagnostics */ dfx_int_type_0_process()
1770 printk("%s: Resetting adapter...\n", bp->dev->name); dfx_int_type_0_process()
1771 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) dfx_int_type_0_process()
1773 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_int_type_0_process()
1774 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_int_type_0_process()
1777 printk("%s: Adapter reset successful!\n", bp->dev->name); dfx_int_type_0_process()
1787 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process()
1788 dfx_xmt_flush(bp); /* flush any outstanding packets */ dfx_int_type_0_process()
1789 (void) dfx_hw_port_ctrl_req(bp, dfx_int_type_0_process()
1802 state = dfx_hw_adap_state_rd(bp); /* get adapter state */ dfx_int_type_0_process()
1811 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name); dfx_int_type_0_process()
1812 dfx_int_pr_halt_id(bp); /* display halt id as string */ dfx_int_type_0_process()
1816 bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_int_type_0_process()
1817 bp->reset_type = 0; /* rerun on-board diagnostics */ dfx_int_type_0_process()
1818 printk("%s: Resetting adapter...\n", bp->dev->name); dfx_int_type_0_process()
1819 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) dfx_int_type_0_process()
1821 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_int_type_0_process()
1822 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); dfx_int_type_0_process()
1825 printk("%s: Adapter reset successful!\n", bp->dev->name); dfx_int_type_0_process()
1829 bp->link_available = PI_K_TRUE; /* set link available flag */ dfx_int_type_0_process()
1847 * bp - pointer to board information
1877 DFX_board_t *bp = netdev_priv(dev); dfx_int_common() local
1882 if(dfx_xmt_done(bp)) /* free consumed xmt packets */ dfx_int_common()
1887 dfx_rcv_queue_process(bp); /* service received LLC frames */ dfx_int_common()
1896 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_int_common()
1900 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_int_common()
1905 dfx_int_type_0_process(bp); /* process Type 0 interrupts */ dfx_int_common()
1948 DFX_board_t *bp = netdev_priv(dev); dfx_interrupt() local
1949 struct device *bdev = bp->bus_dev; dfx_interrupt()
1959 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); dfx_interrupt()
1963 spin_lock(&bp->lock); dfx_interrupt()
1966 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, dfx_interrupt()
1973 dfx_port_write_long(bp, PFI_K_REG_STATUS, dfx_interrupt()
1975 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, dfx_interrupt()
1979 spin_unlock(&bp->lock); dfx_interrupt()
1989 spin_lock(&bp->lock); dfx_interrupt()
2003 spin_unlock(&bp->lock); dfx_interrupt()
2008 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); dfx_interrupt()
2018 spin_lock(&bp->lock); dfx_interrupt()
2023 spin_unlock(&bp->lock); dfx_interrupt()
2075 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_get_stats() local
2077 /* Fill the bp->stats structure with driver-maintained counters */ dfx_ctl_get_stats()
2079 bp->stats.gen.rx_packets = bp->rcv_total_frames; dfx_ctl_get_stats()
2080 bp->stats.gen.tx_packets = bp->xmt_total_frames; dfx_ctl_get_stats()
2081 bp->stats.gen.rx_bytes = bp->rcv_total_bytes; dfx_ctl_get_stats()
2082 bp->stats.gen.tx_bytes = bp->xmt_total_bytes; dfx_ctl_get_stats()
2083 bp->stats.gen.rx_errors = bp->rcv_crc_errors + dfx_ctl_get_stats()
2084 bp->rcv_frame_status_errors + dfx_ctl_get_stats()
2085 bp->rcv_length_errors; dfx_ctl_get_stats()
2086 bp->stats.gen.tx_errors = bp->xmt_length_errors; dfx_ctl_get_stats()
2087 bp->stats.gen.rx_dropped = bp->rcv_discards; dfx_ctl_get_stats()
2088 bp->stats.gen.tx_dropped = bp->xmt_discards; dfx_ctl_get_stats()
2089 bp->stats.gen.multicast = bp->rcv_multicast_frames; dfx_ctl_get_stats()
2090 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */ dfx_ctl_get_stats()
2094 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; dfx_ctl_get_stats()
2095 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_get_stats()
2096 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats()
2098 /* Fill the bp->stats structure with the SMT MIB object values */ dfx_ctl_get_stats()
2100 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); dfx_ctl_get_stats()
2101 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; dfx_ctl_get_stats()
2102 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; dfx_ctl_get_stats()
2103 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; dfx_ctl_get_stats()
2104 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); dfx_ctl_get_stats()
2105 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; dfx_ctl_get_stats()
2106 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; dfx_ctl_get_stats()
2107 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; dfx_ctl_get_stats()
2108 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; dfx_ctl_get_stats()
2109 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; dfx_ctl_get_stats()
2110 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; dfx_ctl_get_stats()
2111 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; dfx_ctl_get_stats()
2112 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; dfx_ctl_get_stats()
2113 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; dfx_ctl_get_stats()
2114 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; dfx_ctl_get_stats()
2115 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; dfx_ctl_get_stats()
2116 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; dfx_ctl_get_stats()
2117 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; dfx_ctl_get_stats()
2118 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; dfx_ctl_get_stats()
2119 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; dfx_ctl_get_stats()
2120 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; dfx_ctl_get_stats()
2121 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; dfx_ctl_get_stats()
2122 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; dfx_ctl_get_stats()
2123 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; dfx_ctl_get_stats()
2124 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; dfx_ctl_get_stats()
2125 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; dfx_ctl_get_stats()
2126 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; dfx_ctl_get_stats()
2127 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; dfx_ctl_get_stats()
2128 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; dfx_ctl_get_stats()
2129 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats()
2130 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats()
2131 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats()
2132 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); dfx_ctl_get_stats()
2133 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; dfx_ctl_get_stats()
2134 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; dfx_ctl_get_stats()
2135 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; dfx_ctl_get_stats()
2136 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); dfx_ctl_get_stats()
2137 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; dfx_ctl_get_stats()
2138 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; dfx_ctl_get_stats()
2139 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; dfx_ctl_get_stats()
2140 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; dfx_ctl_get_stats()
2141 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; dfx_ctl_get_stats()
2142 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; dfx_ctl_get_stats()
2143 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; dfx_ctl_get_stats()
2144 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; dfx_ctl_get_stats()
2145 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; dfx_ctl_get_stats()
2146 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; dfx_ctl_get_stats()
2147 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; dfx_ctl_get_stats()
2148 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; dfx_ctl_get_stats()
2149 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; dfx_ctl_get_stats()
2150 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; dfx_ctl_get_stats()
2151 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; dfx_ctl_get_stats()
2152 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; dfx_ctl_get_stats()
2153 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); dfx_ctl_get_stats()
2154 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; dfx_ctl_get_stats()
2155 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; dfx_ctl_get_stats()
2156 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; dfx_ctl_get_stats()
2157 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; dfx_ctl_get_stats()
2158 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; dfx_ctl_get_stats()
2159 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; dfx_ctl_get_stats()
2160 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; dfx_ctl_get_stats()
2161 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; dfx_ctl_get_stats()
2162 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; dfx_ctl_get_stats()
2163 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; dfx_ctl_get_stats()
2164 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); dfx_ctl_get_stats()
2165 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); dfx_ctl_get_stats()
2166 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; dfx_ctl_get_stats()
2167 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; dfx_ctl_get_stats()
2168 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; dfx_ctl_get_stats()
2169 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; dfx_ctl_get_stats()
2170 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; dfx_ctl_get_stats()
2171 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; dfx_ctl_get_stats()
2172 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; dfx_ctl_get_stats()
2173 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; dfx_ctl_get_stats()
2174 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; dfx_ctl_get_stats()
2175 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; dfx_ctl_get_stats()
2176 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; dfx_ctl_get_stats()
2177 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; dfx_ctl_get_stats()
2178 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; dfx_ctl_get_stats()
2179 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; dfx_ctl_get_stats()
2180 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; dfx_ctl_get_stats()
2181 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; dfx_ctl_get_stats()
2182 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; dfx_ctl_get_stats()
2183 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; dfx_ctl_get_stats()
2184 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; dfx_ctl_get_stats()
2185 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; dfx_ctl_get_stats()
2186 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; dfx_ctl_get_stats()
2187 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; dfx_ctl_get_stats()
2188 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; dfx_ctl_get_stats()
2189 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; dfx_ctl_get_stats()
2190 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; dfx_ctl_get_stats()
2191 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; dfx_ctl_get_stats()
2195 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; dfx_ctl_get_stats()
2196 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_get_stats()
2197 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats()
2199 /* Fill the bp->stats structure with the FDDI counter values */ dfx_ctl_get_stats()
2201 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; dfx_ctl_get_stats()
2202 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; dfx_ctl_get_stats()
2203 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; dfx_ctl_get_stats()
2204 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; dfx_ctl_get_stats()
2205 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; dfx_ctl_get_stats()
2206 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; dfx_ctl_get_stats()
2207 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; dfx_ctl_get_stats()
2208 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; dfx_ctl_get_stats()
2209 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; dfx_ctl_get_stats()
2210 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; dfx_ctl_get_stats()
2211 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; dfx_ctl_get_stats()
2213 return (struct net_device_stats *)&bp->stats; dfx_ctl_get_stats()
2262 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_set_multicast_list() local
2269 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */ dfx_ctl_set_multicast_list()
2275 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */ dfx_ctl_set_multicast_list()
2296 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) dfx_ctl_set_multicast_list()
2298 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ dfx_ctl_set_multicast_list()
2299 bp->mc_count = 0; /* Don't add mc addrs to CAM */ dfx_ctl_set_multicast_list()
2303 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ dfx_ctl_set_multicast_list()
2304 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */ dfx_ctl_set_multicast_list()
2311 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], netdev_for_each_mc_addr()
2314 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) netdev_for_each_mc_addr()
2320 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count);
2326 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
2376 DFX_board_t *bp = netdev_priv(dev); dfx_ctl_set_mac_address() local
2381 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ dfx_ctl_set_mac_address()
2382 bp->uc_count = 1; dfx_ctl_set_mac_address()
2396 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE) dfx_ctl_set_mac_address()
2398 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ dfx_ctl_set_mac_address()
2399 bp->mc_count = 0; /* Don't add mc addrs to CAM */ dfx_ctl_set_mac_address()
2403 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) dfx_ctl_set_mac_address()
2415 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) dfx_ctl_set_mac_address()
2440 * bp - pointer to board information
2460 static int dfx_ctl_update_cam(DFX_board_t *bp) dfx_ctl_update_cam() argument
2478 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */ dfx_ctl_update_cam()
2479 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET; dfx_ctl_update_cam()
2480 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0]; dfx_ctl_update_cam()
2484 for (i=0; i < (int)bp->uc_count; i++) dfx_ctl_update_cam()
2488 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); dfx_ctl_update_cam()
2495 for (i=0; i < (int)bp->mc_count; i++) dfx_ctl_update_cam()
2497 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE) dfx_ctl_update_cam()
2499 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); dfx_ctl_update_cam()
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_update_cam()
2525 * bp - pointer to board information
2543 static int dfx_ctl_update_filters(DFX_board_t *bp) dfx_ctl_update_filters() argument
2549 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET; dfx_ctl_update_filters()
2553 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST; dfx_ctl_update_filters()
2554 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS; dfx_ctl_update_filters()
2558 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM; dfx_ctl_update_filters()
2559 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom; dfx_ctl_update_filters()
2563 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM; dfx_ctl_update_filters()
2564 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom; dfx_ctl_update_filters()
2568 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL; dfx_ctl_update_filters()
2572 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) dfx_ctl_update_filters()
2590 * bp - pointer to board information
2618 static int dfx_hw_dma_cmd_req(DFX_board_t *bp) dfx_hw_dma_cmd_req() argument
2625 status = dfx_hw_adap_state_rd(bp); dfx_hw_dma_cmd_req()
2634 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP | dfx_hw_dma_cmd_req()
2636 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys; dfx_hw_dma_cmd_req()
2640 bp->cmd_rsp_reg.index.prod += 1; dfx_hw_dma_cmd_req()
2641 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req()
2642 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); dfx_hw_dma_cmd_req()
2646 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP | dfx_hw_dma_cmd_req()
2648 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys; dfx_hw_dma_cmd_req()
2652 bp->cmd_req_reg.index.prod += 1; dfx_hw_dma_cmd_req()
2653 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req()
2654 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); dfx_hw_dma_cmd_req()
2663 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req)) dfx_hw_dma_cmd_req()
2672 bp->cmd_req_reg.index.comp += 1; dfx_hw_dma_cmd_req()
2673 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req()
2674 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); dfx_hw_dma_cmd_req()
2683 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp)) dfx_hw_dma_cmd_req()
2692 bp->cmd_rsp_reg.index.comp += 1; dfx_hw_dma_cmd_req()
2693 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_hw_dma_cmd_req()
2694 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); dfx_hw_dma_cmd_req()
2711 * bp - pointer to board information
2733 DFX_board_t *bp, dfx_hw_port_ctrl_req()
2750 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a); dfx_hw_port_ctrl_req()
2751 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b); dfx_hw_port_ctrl_req()
2752 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd); dfx_hw_port_ctrl_req()
2763 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd); dfx_hw_port_ctrl_req()
2778 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); dfx_hw_port_ctrl_req()
2795 * bp - pointer to board information
2817 DFX_board_t *bp, dfx_hw_adap_reset()
2824 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */ dfx_hw_adap_reset()
2825 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET); dfx_hw_adap_reset()
2833 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0); dfx_hw_adap_reset()
2849 * bp - pointer to board information
2864 static int dfx_hw_adap_state_rd(DFX_board_t *bp) dfx_hw_adap_state_rd() argument
2868 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); dfx_hw_adap_state_rd()
2885 * bp - pointer to board information
2904 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) dfx_hw_dma_uninit() argument
2910 dfx_hw_adap_reset(bp, type); dfx_hw_dma_uninit()
2916 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL) dfx_hw_dma_uninit()
2953 * bp - pointer to board information
2976 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) dfx_rcv_init() argument
3000 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_init()
3001 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_init()
3006 newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, dfx_rcv_init()
3016 dma_addr = dma_map_single(bp->bus_dev, dfx_rcv_init()
3020 if (dma_mapping_error(bp->bus_dev, dma_addr)) { dfx_rcv_init()
3024 bp->descr_block_virt->rcv_data[i + j].long_0 = dfx_rcv_init()
3029 bp->descr_block_virt->rcv_data[i + j].long_1 = dfx_rcv_init()
3036 bp->p_rcv_buff_va[i+j] = (char *) newskb; dfx_rcv_init()
3039 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_init()
3040 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_init()
3042 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | dfx_rcv_init()
3044 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); dfx_rcv_init()
3045 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); dfx_rcv_init()
3052 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post; dfx_rcv_init()
3053 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_rcv_init()
3070 * bp - pointer to board information
3091 DFX_board_t *bp dfx_rcv_queue_process()
3102 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); dfx_rcv_queue_process()
3103 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons) dfx_rcv_queue_process()
3109 entry = bp->rcv_xmt_reg.index.rcv_comp; dfx_rcv_queue_process()
3111 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); dfx_rcv_queue_process()
3113 p_buff = bp->p_rcv_buff_va[entry]; dfx_rcv_queue_process()
3115 dma_addr = bp->descr_block_virt->rcv_data[entry].long_1; dfx_rcv_queue_process()
3116 dma_sync_single_for_cpu(bp->bus_dev, dfx_rcv_queue_process()
3125 bp->rcv_crc_errors++; dfx_rcv_queue_process()
3127 bp->rcv_frame_status_errors++; dfx_rcv_queue_process()
3138 bp->rcv_length_errors++; dfx_rcv_queue_process()
3146 newskb = netdev_alloc_skb(bp->dev, dfx_rcv_queue_process()
3151 bp->bus_dev, dfx_rcv_queue_process()
3156 bp->bus_dev, dfx_rcv_queue_process()
3165 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; dfx_rcv_queue_process()
3166 dma_unmap_single(bp->bus_dev, dfx_rcv_queue_process()
3171 bp->p_rcv_buff_va[entry] = (char *)newskb; dfx_rcv_queue_process()
3172 bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr; dfx_rcv_queue_process()
3179 skb = netdev_alloc_skb(bp->dev, dfx_rcv_queue_process()
3183 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name); dfx_rcv_queue_process()
3184 bp->rcv_discards++; dfx_rcv_queue_process()
3191 bp->bus_dev, dfx_rcv_queue_process()
3204 skb->protocol = fddi_type_trans(skb, bp->dev); dfx_rcv_queue_process()
3205 bp->rcv_total_bytes += skb->len; dfx_rcv_queue_process()
3209 bp->rcv_total_frames++; dfx_rcv_queue_process()
3211 bp->rcv_multicast_frames++; dfx_rcv_queue_process()
3224 bp->rcv_xmt_reg.index.rcv_prod += 1; dfx_rcv_queue_process()
3225 bp->rcv_xmt_reg.index.rcv_comp += 1; dfx_rcv_queue_process()
3294 DFX_board_t *bp = netdev_priv(dev); dfx_xmt_queue_pkt() local
3316 bp->xmt_length_errors++; /* bump error counter */ dfx_xmt_queue_pkt()
3333 if (bp->link_available == PI_K_FALSE) dfx_xmt_queue_pkt()
3335 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */ dfx_xmt_queue_pkt()
3336 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */ dfx_xmt_queue_pkt()
3339 bp->xmt_discards++; /* bump error counter */ dfx_xmt_queue_pkt()
3353 dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len, dfx_xmt_queue_pkt()
3355 if (dma_mapping_error(bp->bus_dev, dma_addr)) { dfx_xmt_queue_pkt()
3360 spin_lock_irqsave(&bp->lock, flags); dfx_xmt_queue_pkt()
3364 prod = bp->rcv_xmt_reg.index.xmt_prod; dfx_xmt_queue_pkt()
3365 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]); dfx_xmt_queue_pkt()
3378 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */ dfx_xmt_queue_pkt()
3421 if (prod == bp->rcv_xmt_reg.index.xmt_comp) dfx_xmt_queue_pkt()
3424 spin_unlock_irqrestore(&bp->lock, flags); dfx_xmt_queue_pkt()
3448 bp->rcv_xmt_reg.index.xmt_prod = prod; dfx_xmt_queue_pkt()
3449 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); dfx_xmt_queue_pkt()
3450 spin_unlock_irqrestore(&bp->lock, flags); dfx_xmt_queue_pkt()
3468 * bp - pointer to board information
3488 static int dfx_xmt_done(DFX_board_t *bp) dfx_xmt_done() argument
3497 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); dfx_xmt_done()
3498 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons) dfx_xmt_done()
3502 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); dfx_xmt_done()
3506 bp->xmt_total_frames++; dfx_xmt_done()
3507 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len; dfx_xmt_done()
3510 comp = bp->rcv_xmt_reg.index.xmt_comp; dfx_xmt_done()
3511 dma_unmap_single(bp->bus_dev, dfx_xmt_done()
3512 bp->descr_block_virt->xmt_data[comp].long_1, dfx_xmt_done()
3528 bp->rcv_xmt_reg.index.xmt_comp += 1; dfx_xmt_done()
3547 * bp - pointer to board information
3562 static void dfx_rcv_flush( DFX_board_t *bp ) dfx_rcv_flush()
3566 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) dfx_rcv_flush()
3567 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) dfx_rcv_flush()
3570 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j]; dfx_rcv_flush()
3572 dma_unmap_single(bp->bus_dev, dfx_rcv_flush()
3573 bp->descr_block_virt->rcv_data[i+j].long_1, dfx_rcv_flush()
3578 bp->p_rcv_buff_va[i+j] = NULL; dfx_rcv_flush()
3597 * bp - pointer to board information
3620 static void dfx_xmt_flush( DFX_board_t *bp ) dfx_xmt_flush()
3628 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod) dfx_xmt_flush()
3632 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); dfx_xmt_flush()
3635 comp = bp->rcv_xmt_reg.index.xmt_comp; dfx_xmt_flush()
3636 dma_unmap_single(bp->bus_dev, dfx_xmt_flush()
3637 bp->descr_block_virt->xmt_data[comp].long_1, dfx_xmt_flush()
3644 bp->xmt_discards++; dfx_xmt_flush()
3657 bp->rcv_xmt_reg.index.xmt_comp += 1; dfx_xmt_flush()
3662 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX); dfx_xmt_flush()
3663 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX); dfx_xmt_flush()
3664 bp->cons_block_virt->xmt_rcv_data = prod_cons; dfx_xmt_flush()
3696 DFX_board_t *bp = netdev_priv(dev); dfx_unregister() local
3709 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + dfx_unregister()
3713 if (bp->kmalloced) dfx_unregister()
3715 bp->kmalloced, bp->kmalloced_dma); dfx_unregister()
3725 iounmap(bp->base.mem); dfx_unregister()
2732 dfx_hw_port_ctrl_req( DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data ) dfx_hw_port_ctrl_req() argument
2816 dfx_hw_adap_reset( DFX_board_t *bp, PI_UINT32 type ) dfx_hw_adap_reset() argument
/linux-4.4.14/drivers/net/fddi/skfp/
H A Dskfddi.c384 skfddi_priv *bp = &smc->os; skfp_driver_init() local
390 bp->base_addr = dev->base_addr; skfp_driver_init()
395 spin_lock_init(&bp->DriverLock); skfp_driver_init()
398 bp->LocalRxBuffer = pci_alloc_consistent(&bp->pdev, MAX_FRAME_SIZE, &bp->LocalRxBufferDMA); skfp_driver_init()
399 if (!bp->LocalRxBuffer) { skfp_driver_init()
406 bp->SharedMemSize = mac_drv_check_space(); skfp_driver_init()
407 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize); skfp_driver_init()
408 if (bp->SharedMemSize > 0) { skfp_driver_init()
409 bp->SharedMemSize += 16; // for descriptor alignment skfp_driver_init()
411 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev, skfp_driver_init()
412 bp->SharedMemSize, skfp_driver_init()
413 &bp->SharedMemDMA); skfp_driver_init()
414 if (!bp->SharedMemAddr) { skfp_driver_init()
417 bp->SharedMemSize); skfp_driver_init()
420 bp->SharedMemHeap = 0; // Nothing used yet. skfp_driver_init()
423 bp->SharedMemAddr = NULL; skfp_driver_init()
424 bp->SharedMemHeap = 0; skfp_driver_init()
427 memset(bp->SharedMemAddr, 0, bp->SharedMemSize); skfp_driver_init()
445 if (bp->SharedMemAddr) { skfp_driver_init()
446 pci_free_consistent(&bp->pdev, skfp_driver_init()
447 bp->SharedMemSize, skfp_driver_init()
448 bp->SharedMemAddr, skfp_driver_init()
449 bp->SharedMemDMA); skfp_driver_init()
450 bp->SharedMemAddr = NULL; skfp_driver_init()
452 if (bp->LocalRxBuffer) { skfp_driver_init()
453 pci_free_consistent(&bp->pdev, MAX_FRAME_SIZE, skfp_driver_init()
454 bp->LocalRxBuffer, bp->LocalRxBufferDMA); skfp_driver_init()
455 bp->LocalRxBuffer = NULL; skfp_driver_init()
552 skfddi_priv *bp = &smc->os; skfp_close() local
564 skb_queue_purge(&bp->SendSkbQueue); skfp_close()
565 bp->QueueSkb = MAX_TX_QUEUE_LEN; skfp_close()
610 skfddi_priv *bp; skfp_interrupt() local
613 bp = &smc->os; skfp_interrupt()
626 spin_lock(&bp->DriverLock); skfp_interrupt()
635 spin_unlock(&bp->DriverLock); skfp_interrupt()
675 struct s_smc *bp = netdev_priv(dev); skfp_ctl_get_stats() local
677 /* Fill the bp->stats structure with driver-maintained counters */ skfp_ctl_get_stats()
679 bp->os.MacStat.port_bs_flag[0] = 0x1234; skfp_ctl_get_stats()
680 bp->os.MacStat.port_bs_flag[1] = 0x5678; skfp_ctl_get_stats()
685 /* Fill the bp->stats structure with the SMT MIB object values */ skfp_ctl_get_stats()
687 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); skfp_ctl_get_stats()
688 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; skfp_ctl_get_stats()
689 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; skfp_ctl_get_stats()
690 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; skfp_ctl_get_stats()
691 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); skfp_ctl_get_stats()
692 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; skfp_ctl_get_stats()
693 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; skfp_ctl_get_stats()
694 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; skfp_ctl_get_stats()
695 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; skfp_ctl_get_stats()
696 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; skfp_ctl_get_stats()
697 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; skfp_ctl_get_stats()
698 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; skfp_ctl_get_stats()
699 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; skfp_ctl_get_stats()
700 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; skfp_ctl_get_stats()
701 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; skfp_ctl_get_stats()
702 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; skfp_ctl_get_stats()
703 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; skfp_ctl_get_stats()
704 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; skfp_ctl_get_stats()
705 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; skfp_ctl_get_stats()
706 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; skfp_ctl_get_stats()
707 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; skfp_ctl_get_stats()
708 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; skfp_ctl_get_stats()
709 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; skfp_ctl_get_stats()
710 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; skfp_ctl_get_stats()
711 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; skfp_ctl_get_stats()
712 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; skfp_ctl_get_stats()
713 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; skfp_ctl_get_stats()
714 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; skfp_ctl_get_stats()
715 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; skfp_ctl_get_stats()
716 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats()
717 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats()
718 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats()
719 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); skfp_ctl_get_stats()
720 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; skfp_ctl_get_stats()
721 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; skfp_ctl_get_stats()
722 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; skfp_ctl_get_stats()
723 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); skfp_ctl_get_stats()
724 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; skfp_ctl_get_stats()
725 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; skfp_ctl_get_stats()
726 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; skfp_ctl_get_stats()
727 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; skfp_ctl_get_stats()
728 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; skfp_ctl_get_stats()
729 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; skfp_ctl_get_stats()
730 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; skfp_ctl_get_stats()
731 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; skfp_ctl_get_stats()
732 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; skfp_ctl_get_stats()
733 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; skfp_ctl_get_stats()
734 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; skfp_ctl_get_stats()
735 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; skfp_ctl_get_stats()
736 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; skfp_ctl_get_stats()
737 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; skfp_ctl_get_stats()
738 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; skfp_ctl_get_stats()
739 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; skfp_ctl_get_stats()
740 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); skfp_ctl_get_stats()
741 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; skfp_ctl_get_stats()
742 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; skfp_ctl_get_stats()
743 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; skfp_ctl_get_stats()
744 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; skfp_ctl_get_stats()
745 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; skfp_ctl_get_stats()
746 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; skfp_ctl_get_stats()
747 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; skfp_ctl_get_stats()
748 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; skfp_ctl_get_stats()
749 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; skfp_ctl_get_stats()
750 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; skfp_ctl_get_stats()
751 memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); skfp_ctl_get_stats()
752 memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); skfp_ctl_get_stats()
753 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; skfp_ctl_get_stats()
754 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; skfp_ctl_get_stats()
755 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; skfp_ctl_get_stats()
756 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; skfp_ctl_get_stats()
757 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; skfp_ctl_get_stats()
758 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; skfp_ctl_get_stats()
759 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; skfp_ctl_get_stats()
760 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; skfp_ctl_get_stats()
761 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; skfp_ctl_get_stats()
762 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; skfp_ctl_get_stats()
763 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; skfp_ctl_get_stats()
764 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; skfp_ctl_get_stats()
765 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; skfp_ctl_get_stats()
766 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; skfp_ctl_get_stats()
767 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; skfp_ctl_get_stats()
768 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; skfp_ctl_get_stats()
769 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; skfp_ctl_get_stats()
770 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; skfp_ctl_get_stats()
771 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; skfp_ctl_get_stats()
772 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; skfp_ctl_get_stats()
773 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; skfp_ctl_get_stats()
774 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; skfp_ctl_get_stats()
775 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; skfp_ctl_get_stats()
776 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; skfp_ctl_get_stats()
777 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; skfp_ctl_get_stats()
778 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; skfp_ctl_get_stats()
781 /* Fill the bp->stats structure with the FDDI counter values */ skfp_ctl_get_stats()
783 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; skfp_ctl_get_stats()
784 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; skfp_ctl_get_stats()
785 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; skfp_ctl_get_stats()
786 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; skfp_ctl_get_stats()
787 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; skfp_ctl_get_stats()
788 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; skfp_ctl_get_stats()
789 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; skfp_ctl_get_stats()
790 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; skfp_ctl_get_stats()
791 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; skfp_ctl_get_stats()
792 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; skfp_ctl_get_stats()
793 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; skfp_ctl_get_stats()
796 return (struct net_device_stats *)&bp->os.MacStat; skfp_ctl_get_stats()
840 skfddi_priv *bp = &smc->os; skfp_ctl_set_multicast_list() local
843 spin_lock_irqsave(&bp->DriverLock, Flags); skfp_ctl_set_multicast_list()
845 spin_unlock_irqrestore(&bp->DriverLock, Flags); skfp_ctl_set_multicast_list()
925 skfddi_priv *bp = &smc->os; skfp_ctl_set_mac_address() local
930 spin_lock_irqsave(&bp->DriverLock, Flags); skfp_ctl_set_mac_address()
932 spin_unlock_irqrestore(&bp->DriverLock, Flags); skfp_ctl_set_mac_address()
1045 skfddi_priv *bp = &smc->os; skfp_send_pkt() local
1059 bp->MacStat.gen.tx_errors++; /* bump error counter */ skfp_send_pkt()
1065 if (bp->QueueSkb == 0) { // return with tbusy set: queue full skfp_send_pkt()
1070 bp->QueueSkb--; skfp_send_pkt()
1071 skb_queue_tail(&bp->SendSkbQueue, skb); skfp_send_pkt()
1073 if (bp->QueueSkb == 0) { skfp_send_pkt()
1106 skfddi_priv *bp = &smc->os; send_queued_packets() local
1119 skb = skb_dequeue(&bp->SendSkbQueue); send_queued_packets()
1126 spin_lock_irqsave(&bp->DriverLock, Flags); send_queued_packets()
1155 pr_debug("%s: out of TXDs.\n", bp->dev->name); send_queued_packets()
1158 bp->dev->name); send_queued_packets()
1163 skb_queue_head(&bp->SendSkbQueue, skb); send_queued_packets()
1164 spin_unlock_irqrestore(&bp->DriverLock, Flags); send_queued_packets()
1169 bp->QueueSkb++; // one packet less in local queue send_queued_packets()
1176 dma_address = pci_map_single(&bp->pdev, skb->data, send_queued_packets()
1186 pci_unmap_single(&bp->pdev, dma_address, send_queued_packets()
1190 spin_unlock_irqrestore(&bp->DriverLock, Flags); send_queued_packets()
1278 skfddi_priv *bp = &smc->os; llc_restart_tx() local
1283 spin_unlock(&bp->DriverLock); llc_restart_tx()
1285 spin_lock(&bp->DriverLock); llc_restart_tx()
1286 netif_start_queue(bp->dev);// system may send again if it was blocked llc_restart_tx()
1462 skfddi_priv *bp = &smc->os; dma_complete() local
1467 int MaxFrameSize = bp->MaxFrameSize; dma_complete()
1469 pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr, dma_complete()
1576 skfddi_priv *bp = &smc->os; mac_drv_rx_complete() local
1658 skb->protocol = fddi_type_trans(skb, bp->dev); mac_drv_rx_complete()
1851 skfddi_priv *bp = &smc->os; mac_drv_clear_rxd() local
1852 int MaxFrameSize = bp->MaxFrameSize; mac_drv_clear_rxd()
1854 pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr, mac_drv_clear_rxd()
/linux-4.4.14/arch/x86/um/asm/
H A Dprocessor_64.h38 #define current_bp() ({ unsigned long bp; __asm__("movq %%rbp, %0" : "=r" (bp) : ); bp; })
H A Dprocessor_32.h59 #define current_bp() ({ unsigned long bp; __asm__("movl %%ebp, %0" : "=r" (bp) : ); bp; })
/linux-4.4.14/arch/powerpc/kernel/
H A Dhw_breakpoint.c63 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument
65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint()
68 *slot = bp; arch_install_hw_breakpoint()
74 if (current->thread.last_hit_ubp != bp) arch_install_hw_breakpoint()
89 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument
93 if (*slot != bp) { arch_uninstall_hw_breakpoint()
106 void arch_unregister_hw_breakpoint(struct perf_event *bp) arch_unregister_hw_breakpoint() argument
113 if (bp->ctx && bp->ctx->task) arch_unregister_hw_breakpoint()
114 bp->ctx->task->thread.last_hit_ubp = NULL; arch_unregister_hw_breakpoint()
120 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument
122 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace()
142 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument
145 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings()
147 if (!bp) arch_validate_hwbkpt_settings()
151 if (bp->attr.bp_type & HW_BREAKPOINT_R) arch_validate_hwbkpt_settings()
153 if (bp->attr.bp_type & HW_BREAKPOINT_W) arch_validate_hwbkpt_settings()
158 if (!(bp->attr.exclude_user)) arch_validate_hwbkpt_settings()
160 if (!(bp->attr.exclude_kernel)) arch_validate_hwbkpt_settings()
162 if (!(bp->attr.exclude_hv)) arch_validate_hwbkpt_settings()
164 info->address = bp->attr.bp_addr; arch_validate_hwbkpt_settings()
165 info->len = bp->attr.bp_len; arch_validate_hwbkpt_settings()
177 if ((bp->attr.bp_addr >> 10) != arch_validate_hwbkpt_settings()
178 ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10)) arch_validate_hwbkpt_settings()
211 struct perf_event *bp; hw_breakpoint_handler() local
229 bp = __this_cpu_read(bp_per_reg); hw_breakpoint_handler()
230 if (!bp) hw_breakpoint_handler()
232 info = counter_arch_bp(bp); hw_breakpoint_handler()
240 if (bp->overflow_handler == ptrace_triggered) { hw_breakpoint_handler()
241 perf_bp_event(bp, regs); hw_breakpoint_handler()
253 if (!((bp->attr.bp_addr <= dar) && hw_breakpoint_handler()
254 (dar - bp->attr.bp_addr < bp->attr.bp_len))) hw_breakpoint_handler()
259 current->thread.last_hit_ubp = bp; hw_breakpoint_handler()
277 perf_event_disable(bp); hw_breakpoint_handler()
285 perf_bp_event(bp, regs); hw_breakpoint_handler()
299 struct perf_event *bp = NULL; single_step_dabr_instruction() local
302 bp = current->thread.last_hit_ubp; single_step_dabr_instruction()
307 if (!bp) single_step_dabr_instruction()
310 info = counter_arch_bp(bp); single_step_dabr_instruction()
317 perf_bp_event(bp, regs); single_step_dabr_instruction()
363 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
H A Dptrace.c918 void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument
929 attr = bp->attr; ptrace_triggered()
931 modify_user_hw_breakpoint(bp, &attr); ptrace_triggered()
941 struct perf_event *bp; ptrace_set_debugreg() local
979 bp = thread->ptrace_bps[0]; ptrace_set_debugreg()
981 if (bp) { ptrace_set_debugreg()
982 unregister_hw_breakpoint(bp); ptrace_set_debugreg()
987 if (bp) { ptrace_set_debugreg()
988 attr = bp->attr; ptrace_set_debugreg()
995 ret = modify_user_hw_breakpoint(bp, &attr); ptrace_set_debugreg()
999 thread->ptrace_bps[0] = bp; ptrace_set_debugreg()
1010 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_set_debugreg()
1012 if (IS_ERR(bp)) { ptrace_set_debugreg()
1014 return PTR_ERR(bp); ptrace_set_debugreg()
1375 struct perf_event *bp; ppc_set_hwdebug() local
1445 bp = thread->ptrace_bps[0]; ppc_set_hwdebug()
1446 if (bp) ppc_set_hwdebug()
1455 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ppc_set_hwdebug()
1457 if (IS_ERR(bp)) { ppc_set_hwdebug()
1459 return PTR_ERR(bp); ppc_set_hwdebug()
1482 struct perf_event *bp; ppc_del_hwdebug() local
1505 bp = thread->ptrace_bps[0]; ppc_del_hwdebug()
1506 if (bp) { ppc_del_hwdebug()
1507 unregister_hw_breakpoint(bp); ppc_del_hwdebug()
/linux-4.4.14/fs/jfs/
H A Djfs_logmgr.c188 static void lbmFree(struct lbuf * bp);
189 static void lbmfree(struct lbuf * bp);
191 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, int cant_block);
192 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag);
193 static int lbmIOWait(struct lbuf * bp, int flag);
195 static void lbmStartIO(struct lbuf * bp);
361 struct lbuf *bp; /* dst log page buffer */ lmWriteRecord() local
380 bp = (struct lbuf *) log->bp; lmWriteRecord()
381 lp = (struct logpage *) bp->l_ldata; lmWriteRecord()
431 bp = log->bp; lmWriteRecord()
432 lp = (struct logpage *) bp->l_ldata; lmWriteRecord()
456 bp = (struct lbuf *) log->bp; lmWriteRecord()
457 lp = (struct logpage *) bp->l_ldata; lmWriteRecord()
509 bp->l_eor = dstoffset; lmWriteRecord()
515 bp->l_eor); lmWriteRecord()
531 tblk->bp = log->bp; lmWriteRecord()
541 jfs_info("lmWriteRecord: lrd:0x%04x bp:0x%p pn:%d eor:0x%x", lmWriteRecord()
542 le16_to_cpu(lrd->type), log->bp, log->page, dstoffset); lmWriteRecord()
552 bp = (struct lbuf *) log->bp; lmWriteRecord()
553 lp = (struct logpage *) bp->l_ldata; lmWriteRecord()
578 struct lbuf *bp; lmNextPage() local
584 bp = log->bp; lmNextPage()
585 lp = (struct logpage *) bp->l_ldata; lmNextPage()
620 if (bp->l_wqnext == NULL) lmNextPage()
621 lbmWrite(log, bp, 0, 0); lmNextPage()
635 bp->l_ceor = bp->l_eor; lmNextPage()
636 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmNextPage()
637 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, 0); lmNextPage()
653 log->bp = nextbp; lmNextPage()
760 struct lbuf *bp; lmGCwrite() local
789 bp = (struct lbuf *) tblk->bp; lmGCwrite()
790 lp = (struct logpage *) bp->l_ldata; lmGCwrite()
796 bp->l_ceor = bp->l_eor; lmGCwrite()
797 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmGCwrite()
798 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmGC, lmGCwrite()
804 bp->l_ceor = tblk->eor; /* ? bp->l_ceor = bp->l_eor; */ lmGCwrite()
805 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lmGCwrite()
806 lbmWrite(log, bp, lbmWRITE | lbmGC, cant_write); lmGCwrite()
823 static void lmPostGC(struct lbuf * bp) lmPostGC() argument
826 struct jfs_log *log = bp->l_log; lmPostGC()
846 if (bp->l_flag & lbmERROR) lmPostGC()
881 lbmFree(bp); lmPostGC()
887 lp = (struct logpage *) bp->l_ldata; lmPostGC()
888 bp->l_ceor = bp->l_eor; lmPostGC()
889 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmPostGC()
891 lbmWrite(log, bp, lbmWRITE | lbmRELEASE | lbmFREE, lmPostGC()
905 ((log->gcrtc > 0) || (tblk->bp->l_wqnext != NULL) || lmPostGC()
1268 struct lbuf *bp; lmLogInit() local
1305 bp = lbmAllocate(log , 0); lmLogInit()
1306 log->bp = bp; lmLogInit()
1307 bp->l_pn = bp->l_eor = 0; lmLogInit()
1358 if ((rc = lbmRead(log, log->page, &bp))) lmLogInit()
1361 lp = (struct logpage *) bp->l_ldata; lmLogInit()
1367 log->bp = bp; lmLogInit()
1368 bp->l_pn = log->page; lmLogInit()
1369 bp->l_eor = log->eor; lmLogInit()
1391 bp = log->bp; lmLogInit()
1392 bp->l_ceor = bp->l_eor; lmLogInit()
1393 lp = (struct logpage *) bp->l_ldata; lmLogInit()
1394 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmLogInit()
1395 lbmWrite(log, bp, lbmWRITE | lbmSYNC, 0); lmLogInit()
1396 if ((rc = lbmIOWait(bp, 0))) lmLogInit()
1432 bp->l_wqnext = NULL; lmLogInit()
1433 lbmFree(bp); lmLogInit()
1661 struct lbuf *bp; lmLogShutdown() local
1679 bp = log->bp; lmLogShutdown()
1680 lp = (struct logpage *) bp->l_ldata; lmLogShutdown()
1681 lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lmLogShutdown()
1682 lbmWrite(log, log->bp, lbmWRITE | lbmRELEASE | lbmSYNC, 0); lmLogShutdown()
1683 lbmIOWait(log->bp, lbmFREE); lmLogShutdown()
1684 log->bp = NULL; lmLogShutdown()
1800 * bp->wrqueue field), and
1817 log->bp = NULL; lbmLogInit()
1901 struct lbuf *bp; lbmAllocate() local
1908 LCACHE_SLEEP_COND(log->free_wait, (bp = log->lbuf_free), flags); lbmAllocate()
1909 log->lbuf_free = bp->l_freelist; lbmAllocate()
1912 bp->l_flag = 0; lbmAllocate()
1914 bp->l_wqnext = NULL; lbmAllocate()
1915 bp->l_freelist = NULL; lbmAllocate()
1917 bp->l_pn = pn; lbmAllocate()
1918 bp->l_blkno = log->base + (pn << (L2LOGPSIZE - log->l2bsize)); lbmAllocate()
1919 bp->l_ceor = 0; lbmAllocate()
1921 return bp; lbmAllocate()
1930 static void lbmFree(struct lbuf * bp) lbmFree() argument
1936 lbmfree(bp); lbmFree()
1941 static void lbmfree(struct lbuf * bp) lbmfree() argument
1943 struct jfs_log *log = bp->l_log; lbmfree()
1945 assert(bp->l_wqnext == NULL); lbmfree()
1950 bp->l_freelist = log->lbuf_free; lbmfree()
1951 log->lbuf_free = bp; lbmfree()
1964 * bp - log buffer
1969 static inline void lbmRedrive(struct lbuf *bp) lbmRedrive() argument
1974 bp->l_redrive_next = log_redrive_list; lbmRedrive()
1975 log_redrive_list = bp; lbmRedrive()
1988 struct lbuf *bp; lbmRead() local
1993 *bpp = bp = lbmAllocate(log, pn); lbmRead()
1994 jfs_info("lbmRead: bp:0x%p pn:0x%x", bp, pn); lbmRead()
1996 bp->l_flag |= lbmREAD; lbmRead()
2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmRead()
2003 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); lbmRead()
2007 bio->bi_private = bp; lbmRead()
2016 wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD)); lbmRead()
2037 static void lbmWrite(struct jfs_log * log, struct lbuf * bp, int flag, lbmWrite() argument
2043 jfs_info("lbmWrite: bp:0x%p flag:0x%x pn:0x%x", bp, flag, bp->l_pn); lbmWrite()
2046 bp->l_blkno = lbmWrite()
2047 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); lbmWrite()
2054 bp->l_flag = flag; lbmWrite()
2057 * insert bp at tail of write queue associated with log lbmWrite()
2059 * (request is either for bp already/currently at head of queue lbmWrite()
2060 * or new bp to be inserted at tail) lbmWrite()
2065 if (bp->l_wqnext == NULL) { lbmWrite()
2068 log->wqueue = bp; lbmWrite()
2069 bp->l_wqnext = bp; lbmWrite()
2071 log->wqueue = bp; lbmWrite()
2072 bp->l_wqnext = tail->l_wqnext; lbmWrite()
2073 tail->l_wqnext = bp; lbmWrite()
2076 tail = bp; lbmWrite()
2080 if ((bp != tail->l_wqnext) || !(flag & lbmWRITE)) { lbmWrite()
2088 lbmRedrive(bp); lbmWrite()
2090 lbmStartIO(bp); lbmWrite()
2093 lbmStartIO(bp); lbmWrite()
2105 static void lbmDirectWrite(struct jfs_log * log, struct lbuf * bp, int flag) lbmDirectWrite() argument
2107 jfs_info("lbmDirectWrite: bp:0x%p flag:0x%x pn:0x%x", lbmDirectWrite()
2108 bp, flag, bp->l_pn); lbmDirectWrite()
2113 bp->l_flag = flag | lbmDIRECT; lbmDirectWrite()
2116 bp->l_blkno = lbmDirectWrite()
2117 log->base + (bp->l_pn << (L2LOGPSIZE - log->l2bsize)); lbmDirectWrite()
2122 lbmStartIO(bp); lbmDirectWrite()
2135 static void lbmStartIO(struct lbuf * bp) lbmStartIO() argument
2138 struct jfs_log *log = bp->l_log; lbmStartIO()
2143 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); lbmStartIO()
2146 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); lbmStartIO()
2150 bio->bi_private = bp; lbmStartIO()
2166 static int lbmIOWait(struct lbuf * bp, int flag) lbmIOWait() argument
2171 jfs_info("lbmIOWait1: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); lbmIOWait()
2175 LCACHE_SLEEP_COND(bp->l_ioevent, (bp->l_flag & lbmDONE), flags); lbmIOWait()
2177 rc = (bp->l_flag & lbmERROR) ? -EIO : 0; lbmIOWait()
2180 lbmfree(bp); lbmIOWait()
2184 jfs_info("lbmIOWait2: bp:0x%p flag:0x%x:0x%x", bp, bp->l_flag, flag); lbmIOWait()
2195 struct lbuf *bp = bio->bi_private; lbmIODone() local
2203 jfs_info("lbmIODone: bp:0x%p flag:0x%x", bp, bp->l_flag); lbmIODone()
2207 bp->l_flag |= lbmDONE; lbmIODone()
2210 bp->l_flag |= lbmERROR; lbmIODone()
2220 if (bp->l_flag & lbmREAD) { lbmIODone()
2221 bp->l_flag &= ~lbmREAD; lbmIODone()
2226 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone()
2234 * the bp at the head of write queue has completed pageout. lbmIODone()
2243 bp->l_flag &= ~lbmWRITE; lbmIODone()
2247 log = bp->l_log; lbmIODone()
2248 log->clsn = (bp->l_pn << L2LOGPSIZE) + bp->l_ceor; lbmIODone()
2250 if (bp->l_flag & lbmDIRECT) { lbmIODone()
2251 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone()
2259 if (bp == tail) { lbmIODone()
2263 if (bp->l_flag & lbmRELEASE) { lbmIODone()
2265 bp->l_wqnext = NULL; lbmIODone()
2273 if (bp->l_flag & lbmRELEASE) { lbmIODone()
2274 nextbp = tail->l_wqnext = bp->l_wqnext; lbmIODone()
2275 bp->l_wqnext = NULL; lbmIODone()
2304 if (bp->l_flag & lbmSYNC) { lbmIODone()
2308 LCACHE_WAKEUP(&bp->l_ioevent); lbmIODone()
2314 else if (bp->l_flag & lbmGC) { lbmIODone()
2316 lmPostGC(bp); lbmIODone()
2326 assert(bp->l_flag & lbmRELEASE); lbmIODone()
2327 assert(bp->l_flag & lbmFREE); lbmIODone()
2328 lbmfree(bp); lbmIODone()
2336 struct lbuf *bp; jfsIOWait() local
2340 while ((bp = log_redrive_list)) { jfsIOWait()
2341 log_redrive_list = bp->l_redrive_next; jfsIOWait()
2342 bp->l_redrive_next = NULL; jfsIOWait()
2344 lbmStartIO(bp); jfsIOWait()
2387 struct lbuf *bp; lmLogFormat() local
2395 bp = lbmAllocate(log, 1); lmLogFormat()
2411 logsuper = (struct logsuper *) bp->l_ldata; lmLogFormat()
2422 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat()
2423 bp->l_blkno = logAddress + sbi->nbperpage; lmLogFormat()
2424 lbmStartIO(bp); lmLogFormat()
2425 if ((rc = lbmIOWait(bp, 0))) lmLogFormat()
2450 lp = (struct logpage *) bp->l_ldata; lmLogFormat()
2465 bp->l_blkno += sbi->nbperpage; lmLogFormat()
2466 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat()
2467 lbmStartIO(bp); lmLogFormat()
2468 if ((rc = lbmIOWait(bp, 0))) lmLogFormat()
2478 bp->l_blkno += sbi->nbperpage; lmLogFormat()
2479 bp->l_flag = lbmWRITE | lbmSYNC | lbmDIRECT; lmLogFormat()
2480 lbmStartIO(bp); lmLogFormat()
2481 if ((rc = lbmIOWait(bp, 0))) lmLogFormat()
2491 lbmFree(bp); lmLogFormat()
/linux-4.4.14/arch/sh/kernel/
H A Dhw_breakpoint.c49 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument
51 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint()
58 *slot = bp; arch_install_hw_breakpoint()
81 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument
83 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint()
89 if (*slot == bp) { arch_uninstall_hw_breakpoint()
126 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument
130 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace()
176 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument
178 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info()
180 info->address = bp->attr.bp_addr; arch_build_bp_info()
183 switch (bp->attr.bp_len) { arch_build_bp_info()
201 switch (bp->attr.bp_type) { arch_build_bp_info()
221 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument
223 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings()
227 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings()
284 struct perf_event *bp; hw_breakpoint_handler() local
319 bp = per_cpu(bp_per_reg[i], cpu); hw_breakpoint_handler()
320 if (bp) hw_breakpoint_handler()
330 * bp can be NULL due to concurrent perf counter hw_breakpoint_handler()
333 if (!bp) { hw_breakpoint_handler()
342 if (bp->overflow_handler == ptrace_triggered) hw_breakpoint_handler()
345 perf_bp_event(bp, args->regs); hw_breakpoint_handler()
348 if (!arch_check_bp_in_kernelspace(bp)) { hw_breakpoint_handler()
404 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
H A Dptrace_32.c65 void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument
74 attr = bp->attr; ptrace_triggered()
76 modify_user_hw_breakpoint(bp, &attr); ptrace_triggered()
82 struct perf_event *bp; set_single_step() local
85 bp = thread->ptrace_bps[0]; set_single_step()
86 if (!bp) { set_single_step()
93 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, set_single_step()
95 if (IS_ERR(bp)) set_single_step()
96 return PTR_ERR(bp); set_single_step()
98 thread->ptrace_bps[0] = bp; set_single_step()
102 attr = bp->attr; set_single_step()
106 err = modify_user_hw_breakpoint(bp, &attr); set_single_step()
/linux-4.4.14/arch/sh/include/asm/
H A Dhw_breakpoint.h56 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
57 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
61 int arch_install_hw_breakpoint(struct perf_event *bp);
62 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
63 void hw_breakpoint_pmu_read(struct perf_event *bp);
65 extern void arch_fill_perf_breakpoint(struct perf_event *bp);
/linux-4.4.14/net/ax25/
H A Dax25_ip.c105 unsigned char *bp = skb->data; ax25_ip_xmit() local
114 dst = (ax25_address *)(bp + 1); ax25_ip_xmit()
115 src = (ax25_address *)(bp + 8); ax25_ip_xmit()
132 if (bp[16] == AX25_P_IP) { ax25_ip_xmit()
164 * to bp which is part of skb->data would not be valid ax25_ip_xmit()
168 bp = ourskb->data; ax25_ip_xmit()
169 dst_c = *(ax25_address *)(bp + 1); ax25_ip_xmit()
170 src_c = *(ax25_address *)(bp + 8); ax25_ip_xmit()
187 bp[7] &= ~AX25_CBIT; ax25_ip_xmit()
188 bp[7] &= ~AX25_EBIT; ax25_ip_xmit()
189 bp[7] |= AX25_SSSID_SPARE; ax25_ip_xmit()
191 bp[14] &= ~AX25_CBIT; ax25_ip_xmit()
192 bp[14] |= AX25_EBIT; ax25_ip_xmit()
193 bp[14] |= AX25_SSSID_SPARE; ax25_ip_xmit()
/linux-4.4.14/drivers/isdn/mISDN/
H A Dcore.c129 char *bp = buf; channelmap_show() local
133 *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; channelmap_show()
135 return bp - buf; channelmap_show()
286 struct Bprotocol *bp; get_all_Bprotocols() local
290 list_for_each_entry(bp, &Bprotocols, list) get_all_Bprotocols()
291 m |= bp->Bprotocols; get_all_Bprotocols()
299 struct Bprotocol *bp; get_Bprotocol4mask() local
302 list_for_each_entry(bp, &Bprotocols, list) get_Bprotocol4mask()
303 if (bp->Bprotocols & m) { get_Bprotocol4mask()
305 return bp; get_Bprotocol4mask()
326 mISDN_register_Bprotocol(struct Bprotocol *bp) mISDN_register_Bprotocol() argument
333 bp->name, bp->Bprotocols); mISDN_register_Bprotocol()
334 old = get_Bprotocol4mask(bp->Bprotocols); mISDN_register_Bprotocol()
338 old->name, old->Bprotocols, bp->name, bp->Bprotocols); mISDN_register_Bprotocol()
342 list_add_tail(&bp->list, &Bprotocols); mISDN_register_Bprotocol()
349 mISDN_unregister_Bprotocol(struct Bprotocol *bp) mISDN_unregister_Bprotocol() argument
354 printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, mISDN_unregister_Bprotocol()
355 bp->Bprotocols); mISDN_unregister_Bprotocol()
357 list_del(&bp->list); mISDN_unregister_Bprotocol()
/linux-4.4.14/arch/x86/kernel/
H A Dhw_breakpoint.c105 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument
107 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint()
115 *slot = bp; arch_install_hw_breakpoint()
145 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument
147 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint()
154 if (*slot == bp) { arch_uninstall_hw_breakpoint()
174 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument
178 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace()
181 len = bp->attr.bp_len; arch_check_bp_in_kernelspace()
236 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument
238 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info()
240 info->address = bp->attr.bp_addr; arch_build_bp_info()
243 switch (bp->attr.bp_type) { arch_build_bp_info()
256 if (bp->attr.bp_addr >= TASK_SIZE_MAX) { arch_build_bp_info()
258 if (within_kprobe_blacklist(bp->attr.bp_addr)) arch_build_bp_info()
271 if (bp->attr.bp_len == sizeof(long)) { arch_build_bp_info()
282 switch (bp->attr.bp_len) { arch_build_bp_info()
299 if (!is_power_of_2(bp->attr.bp_len)) arch_build_bp_info()
301 if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) arch_build_bp_info()
312 info->mask = bp->attr.bp_len - 1; arch_build_bp_info()
322 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument
324 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings()
329 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings()
376 struct perf_event *bp; aout_dump_debugregs() local
381 bp = thread->ptrace_bps[i]; aout_dump_debugregs()
383 if (bp && !bp->attr.disabled) { aout_dump_debugregs()
384 dump->u_debugreg[i] = bp->attr.bp_addr; aout_dump_debugregs()
385 info = counter_arch_bp(bp); aout_dump_debugregs()
447 struct perf_event *bp; hw_breakpoint_handler() local
487 bp = per_cpu(bp_per_reg[i], cpu); hw_breakpoint_handler()
494 * bp can be NULL due to lazy debug register switching hw_breakpoint_handler()
497 if (!bp) { hw_breakpoint_handler()
502 perf_bp_event(bp, args->regs); hw_breakpoint_handler()
508 if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE) hw_breakpoint_handler()
540 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
H A Ddumpstack_32.c42 unsigned long *stack, unsigned long bp, dump_trace()
60 if (!bp) dump_trace()
61 bp = stack_frame(task, regs); dump_trace()
72 bp = ops->walk_stack(context, stack, bp, ops, data, dump_trace()
95 unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl()
120 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl()
41 dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) dump_trace() argument
94 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() argument
H A Ddumpstack.c97 unsigned long *stack, unsigned long bp, print_context_stack()
101 struct stack_frame *frame = (struct stack_frame *)bp; print_context_stack()
108 if ((unsigned long) stack == bp + sizeof(long)) { print_context_stack()
111 bp = (unsigned long) frame; print_context_stack()
119 return bp; print_context_stack()
125 unsigned long *stack, unsigned long bp, print_context_stack_bp()
129 struct stack_frame *frame = (struct stack_frame *)bp; print_context_stack_bp()
171 unsigned long *stack, unsigned long bp, char *log_lvl) show_trace_log_lvl()
174 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); show_trace_log_lvl()
178 unsigned long *stack, unsigned long bp) show_trace()
180 show_trace_log_lvl(task, regs, stack, bp, ""); show_trace()
185 unsigned long bp = 0; show_stack() local
194 bp = stack_frame(current, NULL); show_stack()
197 show_stack_log_lvl(task, NULL, sp, bp, ""); show_stack()
96 print_context_stack(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) print_context_stack() argument
124 print_context_stack_bp(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) print_context_stack_bp() argument
170 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, char *log_lvl) show_trace_log_lvl() argument
177 show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp) show_trace() argument
H A Ddumpstack_64.c152 unsigned long *stack, unsigned long bp, dump_trace()
175 if (!bp) dump_trace()
176 bp = stack_frame(task, regs); dump_trace()
205 bp = ops->walk_stack(tinfo, stack, bp, ops, dump_trace()
221 bp = ops->walk_stack(tinfo, stack, bp, dump_trace()
243 bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); dump_trace()
250 unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl()
297 show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_stack_log_lvl()
151 dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) dump_trace() argument
249 show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, unsigned long bp, char *log_lvl) show_stack_log_lvl() argument
H A Dkgdb.c61 { "bp", 4, offsetof(struct pt_regs, bp) },
77 { "bp", 8, offsetof(struct pt_regs, bp) },
210 struct perf_event *bp; kgdb_correct_hw_break() local
224 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); kgdb_correct_hw_break()
225 info = counter_arch_bp(bp); kgdb_correct_hw_break()
226 if (bp->attr.disabled != 1) kgdb_correct_hw_break()
228 bp->attr.bp_addr = breakinfo[breakno].addr; kgdb_correct_hw_break()
229 bp->attr.bp_len = breakinfo[breakno].len; kgdb_correct_hw_break()
230 bp->attr.bp_type = breakinfo[breakno].type; kgdb_correct_hw_break()
234 val = arch_install_hw_breakpoint(bp); kgdb_correct_hw_break()
236 bp->attr.disabled = 0; kgdb_correct_hw_break()
315 struct perf_event *bp; kgdb_remove_all_hw_break() local
320 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); kgdb_remove_all_hw_break()
321 if (!bp->attr.disabled) { kgdb_remove_all_hw_break()
322 arch_uninstall_hw_breakpoint(bp); kgdb_remove_all_hw_break()
323 bp->attr.disabled = 1; kgdb_remove_all_hw_break()
401 struct perf_event *bp; kgdb_disable_hw_debug() local
413 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); kgdb_disable_hw_debug()
414 if (bp->attr.disabled == 1) kgdb_disable_hw_debug()
416 arch_uninstall_hw_breakpoint(bp); kgdb_disable_hw_debug()
417 bp->attr.disabled = 1; kgdb_disable_hw_debug()
H A Dptrace.c78 REG_OFFSET_NAME(bp),
556 static void ptrace_triggered(struct perf_event *bp, ptrace_triggered() argument
568 if (thread->ptrace_bps[i] == bp) ptrace_triggered()
580 static unsigned long ptrace_get_dr7(struct perf_event *bp[]) ptrace_get_dr7() argument
587 if (bp[i] && !bp[i]->attr.disabled) { ptrace_get_dr7()
588 info = counter_arch_bp(bp[i]); ptrace_get_dr7()
629 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, ptrace_modify_breakpoint() argument
632 struct perf_event_attr attr = bp->attr; ptrace_modify_breakpoint()
639 return modify_user_hw_breakpoint(bp, &attr); ptrace_modify_breakpoint()
660 struct perf_event *bp = thread->ptrace_bps[i]; ptrace_write_dr7() local
662 if (!bp) { ptrace_write_dr7()
666 bp = ptrace_register_breakpoint(tsk, ptrace_write_dr7()
668 if (IS_ERR(bp)) { ptrace_write_dr7()
669 rc = PTR_ERR(bp); ptrace_write_dr7()
673 thread->ptrace_bps[i] = bp; ptrace_write_dr7()
677 rc = ptrace_modify_breakpoint(bp, len, type, disabled); ptrace_write_dr7()
702 struct perf_event *bp = thread->ptrace_bps[n]; ptrace_get_debugreg() local
704 if (bp) ptrace_get_debugreg()
705 val = bp->hw.info.address; ptrace_get_debugreg()
718 struct perf_event *bp = t->ptrace_bps[nr]; ptrace_set_breakpoint_addr() local
721 if (!bp) { ptrace_set_breakpoint_addr()
723 * Put stub len and type to create an inactive but correct bp. ptrace_set_breakpoint_addr()
733 bp = ptrace_register_breakpoint(tsk, ptrace_set_breakpoint_addr()
736 if (IS_ERR(bp)) ptrace_set_breakpoint_addr()
737 err = PTR_ERR(bp); ptrace_set_breakpoint_addr()
739 t->ptrace_bps[nr] = bp; ptrace_set_breakpoint_addr()
741 struct perf_event_attr attr = bp->attr; ptrace_set_breakpoint_addr()
744 err = modify_user_hw_breakpoint(bp, &attr); ptrace_set_breakpoint_addr()
966 R32(ebp, bp); putreg32()
1036 R32(ebp, bp); getreg32()
H A Dasm-offsets.c52 OFFSET(IA32_SIGCONTEXT_bp, sigcontext_32, bp); common()
H A Dasm-offsets_32.c34 OFFSET(PT_EBP, pt_regs, bp); foo()
H A Dasm-offsets_64.c37 ENTRY(bp); main()
H A Dtime.c35 return *(unsigned long *)(regs->bp + sizeof(long)); profile_pc()
H A Dperf_regs.c25 PT_REGS_OFFSET(PERF_REG_X86_BP, bp),
157 regs_user_copy->bp = -1; perf_get_regs_user()
/linux-4.4.14/drivers/staging/speakup/
H A Dselection.c55 char *bp, *obp; speakup_set_selection() local
102 bp = kmalloc((sel_end-sel_start)/2+1, GFP_ATOMIC); speakup_set_selection()
103 if (!bp) { speakup_set_selection()
108 sel_buffer = bp; speakup_set_selection()
110 obp = bp; speakup_set_selection()
112 *bp = sel_pos(i); speakup_set_selection()
113 if (!ishardspace(*bp++)) speakup_set_selection()
114 obp = bp; speakup_set_selection()
119 if (obp != bp) { speakup_set_selection()
120 bp = obp; speakup_set_selection()
121 *bp++ = '\r'; speakup_set_selection()
123 obp = bp; speakup_set_selection()
126 sel_buffer_lth = bp - sel_buffer; speakup_set_selection()
/linux-4.4.14/fs/freevxfs/
H A Dvxfs_olt.c80 struct buffer_head *bp; vxfs_read_olt() local
85 bp = sb_bread(sbp, vxfs_oblock(sbp, infp->vsi_oltext, bsize)); vxfs_read_olt()
86 if (!bp || !bp->b_data) vxfs_read_olt()
89 op = (struct vxfs_olt *)bp->b_data; vxfs_read_olt()
105 oaddr = bp->b_data + op->olt_size; vxfs_read_olt()
106 eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); vxfs_read_olt()
124 brelse(bp); vxfs_read_olt()
128 brelse(bp); vxfs_read_olt()
H A Dvxfs_subr.c104 struct buffer_head *bp; vxfs_bread() local
108 bp = sb_bread(ip->i_sb, pblock); vxfs_bread()
110 return (bp); vxfs_bread()
117 * @bp: buffer skeleton
121 * The vxfs_get_block function fills @bp with the right physical
130 struct buffer_head *bp, int create) vxfs_getblk()
136 map_bh(bp, ip->i_sb, pblock); vxfs_getblk()
129 vxfs_getblk(struct inode *ip, sector_t iblock, struct buffer_head *bp, int create) vxfs_getblk() argument
H A Dvxfs_fshead.c78 struct buffer_head *bp; vxfs_getfsh() local
80 bp = vxfs_bread(ip, which); vxfs_getfsh()
81 if (bp) { vxfs_getfsh()
86 memcpy(fhp, bp->b_data, sizeof(*fhp)); vxfs_getfsh()
88 put_bh(bp); vxfs_getfsh()
92 brelse(bp); vxfs_getfsh()
H A Dvxfs_bmap.c130 struct buffer_head *bp = NULL; vxfs_bmap_indir() local
138 bp = sb_bread(ip->i_sb, vxfs_bmap_indir()
140 if (!bp || !buffer_mapped(bp)) vxfs_bmap_indir()
143 typ = ((struct vxfs_typed *)bp->b_data) + vxfs_bmap_indir()
148 brelse(bp); vxfs_bmap_indir()
179 brelse(bp); vxfs_bmap_indir()
185 brelse(bp); vxfs_bmap_indir()
H A Dvxfs_super.c152 struct buffer_head *bp = NULL; vxfs_fill_super() local
171 bp = sb_bread(sbp, 1); vxfs_fill_super()
172 if (!bp || !buffer_mapped(bp)) { vxfs_fill_super()
180 rsbp = (struct vxfs_sb *)bp->b_data; vxfs_fill_super()
202 infp->vsi_bp = bp; vxfs_fill_super()
240 brelse(bp); vxfs_fill_super()
H A Dvxfs_inode.c91 struct buffer_head *bp; vxfs_blkiget() local
96 bp = sb_bread(sbp, block); vxfs_blkiget()
98 if (bp && buffer_mapped(bp)) { vxfs_blkiget()
104 dip = (struct vxfs_dinode *)(bp->b_data + offset); vxfs_blkiget()
109 brelse(bp); vxfs_blkiget()
115 brelse(bp); vxfs_blkiget()
/linux-4.4.14/arch/um/kernel/
H A Dstacktrace.c24 unsigned long *sp, bp, addr; dump_trace() local
28 bp = get_frame_pointer(tsk, segv_regs); dump_trace()
31 frame = (struct stack_frame *)bp; dump_trace()
36 if ((unsigned long) sp == bp + sizeof(long)) { dump_trace()
38 bp = (unsigned long)frame; dump_trace()
/linux-4.4.14/arch/m68k/include/asm/
H A Dio_no.h59 unsigned char *bp = (unsigned char *) buf; io_outsb() local
61 *ap = *bp++; io_outsb()
67 unsigned short *bp = (unsigned short *) buf; io_outsw() local
69 *ap = _swapw(*bp++); io_outsw()
75 unsigned int *bp = (unsigned int *) buf; io_outsl() local
77 *ap = _swapl(*bp++); io_outsl()
83 unsigned char *bp = (unsigned char *) buf; io_insb() local
85 *bp++ = *ap; io_insb()
91 unsigned short *bp = (unsigned short *) buf; io_insw() local
93 *bp++ = _swapw(*ap); io_insw()
99 unsigned int *bp = (unsigned int *) buf; io_insl() local
101 *bp++ = _swapl(*ap); io_insl()
/linux-4.4.14/Documentation/spi/
H A Dspidev_fdx.c19 unsigned char buf[32], *bp; do_read() local
42 bp = buf + 2; do_read()
44 printf(" %02x", *bp++); do_read()
51 unsigned char buf[32], *bp; do_msg() local
74 for (bp = buf; len; len--) do_msg()
75 printf(" %02x", *bp++); do_msg()
/linux-4.4.14/net/sctp/
H A Dbind_addr.c125 void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port) sctp_bind_addr_init() argument
127 INIT_LIST_HEAD(&bp->address_list); sctp_bind_addr_init()
128 bp->port = port; sctp_bind_addr_init()
132 static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) sctp_bind_addr_clean() argument
137 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { sctp_bind_addr_clean()
145 void sctp_bind_addr_free(struct sctp_bind_addr *bp) sctp_bind_addr_free() argument
148 sctp_bind_addr_clean(bp); sctp_bind_addr_free()
152 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, sctp_add_bind_addr() argument
168 addr->a.v4.sin_port = htons(bp->port); sctp_add_bind_addr()
178 list_add_tail_rcu(&addr->list, &bp->address_list); sctp_add_bind_addr()
187 int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) sctp_del_bind_addr() argument
195 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { sctp_del_bind_addr()
219 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, sctp_bind_addrs_to_raw() argument
236 list_for_each(pos, &bp->address_list) { sctp_bind_addrs_to_raw()
254 list_for_each_entry(addr, &bp->address_list, list) { sctp_bind_addrs_to_raw()
271 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, sctp_raw_to_bind_addrs() argument
289 sctp_bind_addr_clean(bp); sctp_raw_to_bind_addrs()
294 retval = sctp_add_bind_addr(bp, &addr, SCTP_ADDR_SRC, gfp); sctp_raw_to_bind_addrs()
297 sctp_bind_addr_clean(bp); sctp_raw_to_bind_addrs()
314 int sctp_bind_addr_match(struct sctp_bind_addr *bp, sctp_bind_addr_match() argument
322 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_match()
336 * the bp.
338 int sctp_bind_addr_conflict(struct sctp_bind_addr *bp, sctp_bind_addr_conflict() argument
359 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_conflict()
373 int sctp_bind_addr_state(const struct sctp_bind_addr *bp, sctp_bind_addr_state() argument
385 list_for_each_entry_rcu(laddr, &bp->address_list, list) { sctp_bind_addr_state()
401 union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, sctp_find_unmatch_addr() argument
416 list_for_each_entry(laddr, &bp->address_list, list) { sctp_find_unmatch_addr()
525 struct sctp_bind_addr *bp; sctp_is_ep_boundall() local
528 bp = &sctp_sk(sk)->ep->base.bind_addr; sctp_is_ep_boundall()
529 if (sctp_list_single_entry(&bp->address_list)) { sctp_is_ep_boundall()
530 addr = list_entry(bp->address_list.next, sctp_is_ep_boundall()
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_main.c93 struct mic_bootparam *bp = sdev->dp; scif_qp_setup_handler() local
95 da = bp->scif_card_dma_addr; scif_qp_setup_handler()
96 scifdev->rdb = bp->h2c_scif_db; scif_qp_setup_handler()
98 struct mic_bootparam __iomem *bp = sdev->rdp; scif_qp_setup_handler() local
100 da = readq(&bp->scif_host_dma_addr); scif_qp_setup_handler()
101 scifdev->rdb = ioread8(&bp->c2h_scif_db); scif_qp_setup_handler()
179 struct mic_bootparam *bp = sdev->dp; scif_probe() local
181 bp->c2h_scif_db = scifdev->db; scif_probe()
182 bp->scif_host_dma_addr = scifdev->qp_dma_addr; scif_probe()
184 struct mic_bootparam __iomem *bp = sdev->rdp; scif_probe() local
186 iowrite8(scifdev->db, &bp->h2c_scif_db); scif_probe()
187 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr); scif_probe()
221 struct mic_bootparam *bp = sdev->dp; scif_remove() local
223 bp->c2h_scif_db = -1; scif_remove()
224 bp->scif_host_dma_addr = 0x0; scif_remove()
226 struct mic_bootparam __iomem *bp = sdev->rdp; scif_remove() local
228 iowrite8(-1, &bp->h2c_scif_db); scif_remove()
229 writeq(0x0, &bp->scif_card_dma_addr); scif_remove()
/linux-4.4.14/lib/mpi/
H A Dmpi-pow.c41 mpi_ptr_t rp, ep, mp, bp; mpi_powm() local
91 bp = bp_marker = mpi_alloc_limb_space(bsize + 1); mpi_powm()
92 if (!bp) mpi_powm()
94 MPN_COPY(bp, base->d, bsize); mpi_powm()
97 mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize); mpi_powm()
101 MPN_NORMALIZE(bp, bsize); mpi_powm()
103 bp = base->d; mpi_powm()
115 if (rp == ep || rp == mp || rp == bp) { mpi_powm()
126 if (rp == bp) { mpi_powm()
129 bp = bp_marker = mpi_alloc_limb_space(bsize); mpi_powm()
130 if (!bp) mpi_powm()
132 MPN_COPY(bp, rp, bsize); mpi_powm()
151 MPN_COPY(rp, bp, bsize); mpi_powm()
225 /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */ mpi_powm()
229 (xp, rp, rsize, bp, bsize, mpi_powm()
234 (xp, rp, rsize, bp, bsize, mpi_powm()
/linux-4.4.14/arch/arm/include/asm/
H A Dhw_breakpoint.h120 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
121 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
129 int arch_install_hw_breakpoint(struct perf_event *bp);
130 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
131 void hw_breakpoint_pmu_read(struct perf_event *bp);
/linux-4.4.14/arch/sparc/kernel/
H A Dchmc.c486 static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) chmc_bank_match() argument
492 if (bp->valid == 0) chmc_bank_match()
496 upper_bits ^= bp->um; /* What bits are different? */ chmc_bank_match()
498 upper_bits |= bp->uk; /* What bits don't matter for matching? */ chmc_bank_match()
505 lower_bits ^= bp->lm; /* What bits are different? */ chmc_bank_match()
507 lower_bits |= bp->lk; /* What bits don't matter for matching? */ chmc_bank_match()
526 struct chmc_bank_info *bp; chmc_find_bank() local
528 bp = &p->logical_banks[bank_no]; chmc_find_bank()
529 if (chmc_bank_match(bp, phys_addr)) chmc_find_bank()
530 return bp; chmc_find_bank()
542 struct chmc_bank_info *bp; chmc_print_dimm() local
546 bp = chmc_find_bank(phys_addr); chmc_print_dimm()
547 if (bp == NULL || chmc_print_dimm()
557 prop = &bp->p->layout_prop; chmc_print_dimm()
558 bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); chmc_print_dimm()
631 struct chmc_bank_info *bp = &p->logical_banks[which_bank]; chmc_interpret_one_decode_reg() local
633 bp->p = p; chmc_interpret_one_decode_reg()
634 bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; chmc_interpret_one_decode_reg()
635 bp->raw_reg = val; chmc_interpret_one_decode_reg()
636 bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; chmc_interpret_one_decode_reg()
637 bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; chmc_interpret_one_decode_reg()
638 bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; chmc_interpret_one_decode_reg()
639 bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; chmc_interpret_one_decode_reg()
640 bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; chmc_interpret_one_decode_reg()
642 bp->base = (bp->um); chmc_interpret_one_decode_reg()
643 bp->base &= ~(bp->uk); chmc_interpret_one_decode_reg()
644 bp->base <<= PA_UPPER_BITS_SHIFT; chmc_interpret_one_decode_reg()
646 switch(bp->lk) { chmc_interpret_one_decode_reg()
649 bp->interleave = 1; chmc_interpret_one_decode_reg()
653 bp->interleave = 2; chmc_interpret_one_decode_reg()
657 bp->interleave = 4; chmc_interpret_one_decode_reg()
661 bp->interleave = 8; chmc_interpret_one_decode_reg()
665 bp->interleave = 16; chmc_interpret_one_decode_reg()
672 bp->size = (((unsigned long)bp->uk & chmc_interpret_one_decode_reg()
674 bp->size /= bp->interleave; chmc_interpret_one_decode_reg()
/linux-4.4.14/drivers/input/joystick/
H A Dtwidjoy.c106 struct twidjoy_button_spec *bp; twidjoy_process_packet() local
111 for (bp = twidjoy_buttons; bp->bitmask; bp++) { twidjoy_process_packet()
112 int value = (button_bits & (bp->bitmask << bp->bitshift)) >> bp->bitshift; twidjoy_process_packet()
115 for (i = 0; i < bp->bitmask; i++) twidjoy_process_packet()
116 input_report_key(dev, bp->buttons[i], i+1 == value); twidjoy_process_packet()
183 struct twidjoy_button_spec *bp; twidjoy_connect() local
209 for (bp = twidjoy_buttons; bp->bitmask; bp++) twidjoy_connect()
210 for (i = 0; i < bp->bitmask; i++) twidjoy_connect()
211 set_bit(bp->buttons[i], input_dev->keybit); twidjoy_connect()
/linux-4.4.14/arch/arm64/kernel/
H A Dhw_breakpoint.c167 static int is_compat_bp(struct perf_event *bp) is_compat_bp() argument
169 struct task_struct *tsk = bp->hw.target; is_compat_bp()
187 * @bp: perf_event to setup
196 struct perf_event *bp, hw_breakpoint_slot_setup()
207 *slot = bp; hw_breakpoint_slot_setup()
212 if (*slot == bp) { hw_breakpoint_slot_setup()
218 if (*slot == bp) hw_breakpoint_slot_setup()
229 static int hw_breakpoint_control(struct perf_event *bp, hw_breakpoint_control() argument
232 struct arch_hw_breakpoint *info = counter_arch_bp(bp); hw_breakpoint_control()
255 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); hw_breakpoint_control()
295 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument
297 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL); arch_install_hw_breakpoint()
300 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument
302 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL); arch_uninstall_hw_breakpoint()
328 * Check whether bp virtual address is in kernel space.
330 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument
334 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace()
392 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument
394 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info()
397 switch (bp->attr.bp_type) { arch_build_bp_info()
415 switch (bp->attr.bp_len) { arch_build_bp_info()
438 if (is_compat_bp(bp)) { arch_build_bp_info()
454 info->address = bp->attr.bp_addr; arch_build_bp_info()
461 if (arch_check_bp_in_kernelspace(bp)) arch_build_bp_info()
467 info->ctrl.enabled = !bp->attr.disabled; arch_build_bp_info()
475 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument
477 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings()
482 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings()
495 if (is_compat_bp(bp)) { arch_validate_hwbkpt_settings()
532 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) arch_validate_hwbkpt_settings()
588 struct perf_event *bp, **slots; breakpoint_handler() local
599 bp = slots[i]; breakpoint_handler()
601 if (bp == NULL) breakpoint_handler()
615 counter_arch_bp(bp)->trigger = addr; breakpoint_handler()
616 perf_bp_event(bp, regs); breakpoint_handler()
619 if (!bp->overflow_handler) breakpoint_handler()
945 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
195 hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, struct perf_event *bp, enum hw_breakpoint_ops ops) hw_breakpoint_slot_setup() argument
H A Dptrace.c73 static void ptrace_hbptriggered(struct perf_event *bp, ptrace_hbptriggered() argument
77 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); ptrace_hbptriggered()
92 if (current->thread.debug.hbp_break[i] == bp) { ptrace_hbptriggered()
99 if (current->thread.debug.hbp_watch[i] == bp) { ptrace_hbptriggered()
143 struct perf_event *bp = ERR_PTR(-EINVAL); ptrace_hbp_get_event() local
148 bp = tsk->thread.debug.hbp_break[idx]; ptrace_hbp_get_event()
152 bp = tsk->thread.debug.hbp_watch[idx]; ptrace_hbp_get_event()
156 return bp; ptrace_hbp_get_event()
162 struct perf_event *bp) ptrace_hbp_set_event()
169 tsk->thread.debug.hbp_break[idx] = bp; ptrace_hbp_set_event()
175 tsk->thread.debug.hbp_watch[idx] = bp; ptrace_hbp_set_event()
188 struct perf_event *bp; ptrace_hbp_create() local
214 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); ptrace_hbp_create()
215 if (IS_ERR(bp)) ptrace_hbp_create()
216 return bp; ptrace_hbp_create()
218 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); ptrace_hbp_create()
222 return bp; ptrace_hbp_create()
287 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_ctrl() local
289 if (IS_ERR(bp)) ptrace_hbp_get_ctrl()
290 return PTR_ERR(bp); ptrace_hbp_get_ctrl()
292 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; ptrace_hbp_get_ctrl()
301 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_addr() local
303 if (IS_ERR(bp)) ptrace_hbp_get_addr()
304 return PTR_ERR(bp); ptrace_hbp_get_addr()
306 *addr = bp ? bp->attr.bp_addr : 0; ptrace_hbp_get_addr()
314 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); ptrace_hbp_get_initialised_bp() local
316 if (!bp) ptrace_hbp_get_initialised_bp()
317 bp = ptrace_hbp_create(note_type, tsk, idx); ptrace_hbp_get_initialised_bp()
319 return bp; ptrace_hbp_get_initialised_bp()
328 struct perf_event *bp; ptrace_hbp_set_ctrl() local
332 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); ptrace_hbp_set_ctrl()
333 if (IS_ERR(bp)) { ptrace_hbp_set_ctrl()
334 err = PTR_ERR(bp); ptrace_hbp_set_ctrl()
338 attr = bp->attr; ptrace_hbp_set_ctrl()
344 return modify_user_hw_breakpoint(bp, &attr); ptrace_hbp_set_ctrl()
353 struct perf_event *bp; ptrace_hbp_set_addr() local
356 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); ptrace_hbp_set_addr()
357 if (IS_ERR(bp)) { ptrace_hbp_set_addr()
358 err = PTR_ERR(bp); ptrace_hbp_set_addr()
362 attr = bp->attr; ptrace_hbp_set_addr()
364 err = modify_user_hw_breakpoint(bp, &attr); ptrace_hbp_set_addr()
159 ptrace_hbp_set_event(unsigned int note_type, struct task_struct *tsk, unsigned long idx, struct perf_event *bp) ptrace_hbp_set_event() argument
H A Ddebug-monitors.c335 bool bp = false; aarch32_break_handler() local
349 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; aarch32_break_handler()
351 bp = thumb_instr == AARCH32_BREAK_THUMB; aarch32_break_handler()
357 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; aarch32_break_handler()
360 if (!bp) aarch32_break_handler()
/linux-4.4.14/drivers/tty/vt/
H A Dselection.c163 char *bp, *obp; set_selection() local
298 bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); set_selection()
299 if (!bp) { set_selection()
305 sel_buffer = bp; set_selection()
307 obp = bp; set_selection()
311 bp += store_utf8(c, bp); set_selection()
313 *bp++ = c; set_selection()
315 obp = bp; set_selection()
319 if (obp != bp) { set_selection()
320 bp = obp; set_selection()
321 *bp++ = '\r'; set_selection()
323 obp = bp; set_selection()
326 sel_buffer_lth = bp - sel_buffer; set_selection()
/linux-4.4.14/arch/powerpc/include/asm/
H A Dhw_breakpoint.h63 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
64 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
67 int arch_install_hw_breakpoint(struct perf_event *bp);
68 void arch_uninstall_hw_breakpoint(struct perf_event *bp);
69 void hw_breakpoint_pmu_read(struct perf_event *bp);
73 extern void ptrace_triggered(struct perf_event *bp,
/linux-4.4.14/drivers/media/tuners/
H A Dtda827x.c98 u8 bp; member in struct:tda827x_data
105 { .lomax = 62000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
106 { .lomax = 66000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 1},
107 { .lomax = 76000000, .spd = 3, .bs = 1, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
108 { .lomax = 84000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 3, .div1p5 = 0},
109 { .lomax = 93000000, .spd = 3, .bs = 2, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
110 { .lomax = 98000000, .spd = 3, .bs = 3, .bp = 0, .cp = 0, .gc3 = 1, .div1p5 = 0},
111 { .lomax = 109000000, .spd = 3, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
112 { .lomax = 123000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
113 { .lomax = 133000000, .spd = 2, .bs = 3, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 1},
114 { .lomax = 151000000, .spd = 2, .bs = 1, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
115 { .lomax = 154000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 1, .div1p5 = 0},
116 { .lomax = 181000000, .spd = 2, .bs = 2, .bp = 1, .cp = 0, .gc3 = 0, .div1p5 = 0},
117 { .lomax = 185000000, .spd = 2, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
118 { .lomax = 217000000, .spd = 2, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
119 { .lomax = 244000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
120 { .lomax = 265000000, .spd = 1, .bs = 3, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 1},
121 { .lomax = 302000000, .spd = 1, .bs = 1, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
122 { .lomax = 324000000, .spd = 1, .bs = 2, .bp = 2, .cp = 0, .gc3 = 1, .div1p5 = 0},
123 { .lomax = 370000000, .spd = 1, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
124 { .lomax = 454000000, .spd = 1, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
125 { .lomax = 493000000, .spd = 0, .bs = 2, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
126 { .lomax = 530000000, .spd = 0, .bs = 3, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 1},
127 { .lomax = 554000000, .spd = 0, .bs = 1, .bp = 3, .cp = 0, .gc3 = 1, .div1p5 = 0},
128 { .lomax = 604000000, .spd = 0, .bs = 1, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
129 { .lomax = 696000000, .spd = 0, .bs = 2, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
130 { .lomax = 740000000, .spd = 0, .bs = 2, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
131 { .lomax = 820000000, .spd = 0, .bs = 3, .bp = 4, .cp = 0, .gc3 = 0, .div1p5 = 0},
132 { .lomax = 865000000, .spd = 0, .bs = 3, .bp = 4, .cp = 1, .gc3 = 0, .div1p5 = 0},
133 { .lomax = 0, .spd = 0, .bs = 0, .bp = 0, .cp = 0, .gc3 = 0, .div1p5 = 0}
196 tda827x_table[i].bp; tda827xo_set_params()
284 (tda827x_table[i].bs << 3) + tda827x_table[i].bp; tda827xo_set_analog_params()
/linux-4.4.14/net/bridge/netfilter/
H A Debt_among.c83 const __be32 *bp; get_ip_dst() local
91 bp = skb_header_pointer(skb, sizeof(struct arphdr) + get_ip_dst()
94 if (bp == NULL) get_ip_dst()
96 *addr = *bp; get_ip_dst()
114 const __be32 *bp; get_ip_src() local
122 bp = skb_header_pointer(skb, sizeof(struct arphdr) + get_ip_src()
124 if (bp == NULL) get_ip_src()
126 *addr = *bp; get_ip_src()
/linux-4.4.14/arch/mips/include/uapi/asm/
H A Dbreak.h18 #define BRK_USERBP 0 /* User bp (used by debuggers) */
19 #define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
/linux-4.4.14/arch/h8300/kernel/
H A Dptrace_s.c40 asmlinkage void trace_trap(unsigned long bp) trace_trap() argument
42 (void)bp; trace_trap()
/linux-4.4.14/arch/blackfin/lib/
H A Dstrcpy.S32 if cc jump 1b (bp);
H A Dstrcmp.S36 if cc jump 1b (bp); /* no, keep going */
H A Dstrncmp.S39 if ! cc jump 1b (bp); /* more to do, keep going */
/linux-4.4.14/arch/ia64/hp/sim/boot/
H A Dfw-emu.c241 struct ia64_boot_param *bp; sys_fw_init() local
268 bp = (void *) cp; cp += sizeof(*bp); sys_fw_init()
360 bp->efi_systab = __pa(&fw_mem); sys_fw_init()
361 bp->efi_memmap = __pa(efi_memmap); sys_fw_init()
362 bp->efi_memmap_size = NUM_MEM_DESCS*sizeof(efi_memory_desc_t); sys_fw_init()
363 bp->efi_memdesc_size = sizeof(efi_memory_desc_t); sys_fw_init()
364 bp->efi_memdesc_version = 1; sys_fw_init()
365 bp->command_line = __pa(cmd_line); sys_fw_init()
366 bp->console_info.num_cols = 80; sys_fw_init()
367 bp->console_info.num_rows = 25; sys_fw_init()
368 bp->console_info.orig_x = 0; sys_fw_init()
369 bp->console_info.orig_y = 24; sys_fw_init()
370 bp->fpswa = 0; sys_fw_init()
372 return bp; sys_fw_init()
H A Dbootloader.c36 extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
66 register struct ia64_boot_param *bp; start_bootloader() local
165 bp = sys_fw_init(args, arglen); start_bootloader()
170 jmp_to_kernel((unsigned long) bp, e_entry); start_bootloader()
/linux-4.4.14/arch/arm64/include/asm/
H A Dhw_breakpoint.h114 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
115 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
119 extern int arch_install_hw_breakpoint(struct perf_event *bp);
120 extern void arch_uninstall_hw_breakpoint(struct perf_event *bp);
121 extern void hw_breakpoint_pmu_read(struct perf_event *bp);
/linux-4.4.14/arch/powerpc/xmon/
H A Dxmon.c108 #define BP_NUM(bp) ((bp) - bpts + 1)
406 struct bpt *bp; xmon_core() local
419 bp = in_breakpoint_table(regs->nip, &offset); xmon_core()
420 if (bp != NULL) { xmon_core()
421 regs->nip = bp->address + offset; xmon_core()
422 atomic_dec(&bp->ref_count); xmon_core()
453 bp = NULL; xmon_core()
455 bp = at_breakpoint(regs->nip); xmon_core()
456 if (bp || unrecoverable_excp(regs)) xmon_core()
462 if (bp) { xmon_core()
464 cpu, BP_NUM(bp)); xmon_core()
505 if (bp || TRAP(regs) == 0xd00) xmon_core()
555 bp = at_breakpoint(regs->nip); xmon_core()
556 if (bp) { xmon_core()
557 printf("Stopped at breakpoint %lx (", BP_NUM(bp)); xmon_core()
566 if (bp || TRAP(regs) == 0xd00) xmon_core()
579 bp = at_breakpoint(regs->nip); xmon_core()
580 if (bp != NULL) { xmon_core()
581 regs->nip = (unsigned long) &bp->instr[0]; xmon_core()
582 atomic_inc(&bp->ref_count); xmon_core()
587 bp = at_breakpoint(regs->nip); xmon_core()
588 if (bp != NULL) { xmon_core()
589 int stepped = emulate_step(regs, bp->instr[0]); xmon_core()
591 regs->nip = (unsigned long) &bp->instr[0]; xmon_core()
592 atomic_inc(&bp->ref_count); xmon_core()
595 (IS_RFID(bp->instr[0])? "rfid": "mtmsrd")); xmon_core()
633 struct bpt *bp; xmon_bpt() local
639 /* Are we at the trap at bp->instr[1] for some bp? */ xmon_bpt()
640 bp = in_breakpoint_table(regs->nip, &offset); xmon_bpt()
641 if (bp != NULL && offset == 4) { xmon_bpt()
642 regs->nip = bp->address + 4; xmon_bpt()
643 atomic_dec(&bp->ref_count); xmon_bpt()
648 bp = at_breakpoint(regs->nip); xmon_bpt()
649 if (!bp) xmon_bpt()
696 struct bpt *bp; xmon_fault_handler() local
703 bp = in_breakpoint_table(regs->nip, &offset); xmon_fault_handler()
704 if (bp != NULL) { xmon_fault_handler()
705 regs->nip = bp->address + offset; xmon_fault_handler()
706 atomic_dec(&bp->ref_count); xmon_fault_handler()
716 struct bpt *bp; at_breakpoint() local
718 bp = bpts; at_breakpoint()
719 for (i = 0; i < NBPTS; ++i, ++bp) at_breakpoint()
720 if (bp->enabled && pc == bp->address) at_breakpoint()
721 return bp; at_breakpoint()
742 struct bpt *bp; new_breakpoint() local
745 bp = at_breakpoint(a); new_breakpoint()
746 if (bp) new_breakpoint()
747 return bp; new_breakpoint()
749 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { new_breakpoint()
750 if (!bp->enabled && atomic_read(&bp->ref_count) == 0) { new_breakpoint()
751 bp->address = a; new_breakpoint()
752 bp->instr[1] = bpinstr; new_breakpoint()
753 store_inst(&bp->instr[1]); new_breakpoint()
754 return bp; new_breakpoint()
765 struct bpt *bp; insert_bpts() local
767 bp = bpts; insert_bpts()
768 for (i = 0; i < NBPTS; ++i, ++bp) { insert_bpts()
769 if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0) insert_bpts()
771 if (mread(bp->address, &bp->instr[0], 4) != 4) { insert_bpts()
773 "disabling breakpoint there\n", bp->address); insert_bpts()
774 bp->enabled = 0; insert_bpts()
777 if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) { insert_bpts()
779 "instruction, disabling it\n", bp->address); insert_bpts()
780 bp->enabled = 0; insert_bpts()
783 store_inst(&bp->instr[0]); insert_bpts()
784 if (bp->enabled & BP_CIABR) insert_bpts()
786 if (mwrite(bp->address, &bpinstr, 4) != 4) { insert_bpts()
788 "disabling breakpoint there\n", bp->address); insert_bpts()
789 bp->enabled &= ~BP_TRAP; insert_bpts()
792 store_inst((void *)bp->address); insert_bpts()
814 struct bpt *bp; remove_bpts() local
817 bp = bpts; remove_bpts()
818 for (i = 0; i < NBPTS; ++i, ++bp) { remove_bpts()
819 if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP) remove_bpts()
821 if (mread(bp->address, &instr, 4) == 4 remove_bpts()
823 && mwrite(bp->address, &bp->instr, 4) != 4) remove_bpts()
825 bp->address); remove_bpts()
827 store_inst((void *)bp->address); remove_bpts()
1192 struct bpt *bp; bpt_cmds() local
1234 bp = new_breakpoint(a); bpt_cmds()
1235 if (bp != NULL) { bpt_cmds()
1236 bp->enabled |= BP_CIABR; bpt_cmds()
1237 iabr = bp; bpt_cmds()
1255 bp = &bpts[a-1]; /* bp nums are 1 based */ bpt_cmds()
1258 bp = at_breakpoint(a); bpt_cmds()
1259 if (bp == NULL) { bpt_cmds()
1265 printf("Cleared breakpoint %lx (", BP_NUM(bp)); bpt_cmds()
1266 xmon_print_symbol(bp->address, " ", ")\n"); bpt_cmds()
1267 bp->enabled = 0; bpt_cmds()
1289 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) { bpt_cmds()
1290 if (!bp->enabled) bpt_cmds()
1292 printf("%2x %s ", BP_NUM(bp), bpt_cmds()
1293 (bp->enabled & BP_CIABR) ? "inst": "trap"); bpt_cmds()
1294 xmon_print_symbol(bp->address, " ", "\n"); bpt_cmds()
1301 bp = new_breakpoint(a); bpt_cmds()
1302 if (bp != NULL) bpt_cmds()
1303 bp->enabled |= BP_TRAP; bpt_cmds()
/linux-4.4.14/arch/arm/kernel/
H A Dhw_breakpoint.c333 int arch_install_hw_breakpoint(struct perf_event *bp) arch_install_hw_breakpoint() argument
335 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_install_hw_breakpoint()
361 *slot = bp; arch_install_hw_breakpoint()
390 void arch_uninstall_hw_breakpoint(struct perf_event *bp) arch_uninstall_hw_breakpoint() argument
392 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint()
412 if (*slot == bp) { arch_uninstall_hw_breakpoint()
457 * Check whether bp virtual address is in kernel space.
459 int arch_check_bp_in_kernelspace(struct perf_event *bp) arch_check_bp_in_kernelspace() argument
463 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_check_bp_in_kernelspace()
521 static int arch_build_bp_info(struct perf_event *bp) arch_build_bp_info() argument
523 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_build_bp_info()
526 switch (bp->attr.bp_type) { arch_build_bp_info()
544 switch (bp->attr.bp_len) { arch_build_bp_info()
575 info->address = bp->attr.bp_addr; arch_build_bp_info()
579 if (arch_check_bp_in_kernelspace(bp)) arch_build_bp_info()
583 info->ctrl.enabled = !bp->attr.disabled; arch_build_bp_info()
594 int arch_validate_hwbkpt_settings(struct perf_event *bp) arch_validate_hwbkpt_settings() argument
596 struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_validate_hwbkpt_settings()
605 ret = arch_build_bp_info(bp); arch_validate_hwbkpt_settings()
634 if (!bp->overflow_handler) { arch_validate_hwbkpt_settings()
643 if (arch_check_bp_in_kernelspace(bp)) arch_validate_hwbkpt_settings()
650 if (!bp->hw.target) arch_validate_hwbkpt_settings()
668 * Enable/disable single-stepping over the breakpoint bp at address addr.
670 static void enable_single_step(struct perf_event *bp, u32 addr) enable_single_step() argument
672 struct arch_hw_breakpoint *info = counter_arch_bp(bp); enable_single_step()
674 arch_uninstall_hw_breakpoint(bp); enable_single_step()
681 arch_install_hw_breakpoint(bp); enable_single_step()
684 static void disable_single_step(struct perf_event *bp) disable_single_step() argument
686 arch_uninstall_hw_breakpoint(bp); disable_single_step()
687 counter_arch_bp(bp)->step_ctrl.enabled = 0; disable_single_step()
688 arch_install_hw_breakpoint(bp); disable_single_step()
801 struct perf_event *bp, **slots; breakpoint_handler() local
814 bp = slots[i]; breakpoint_handler()
816 if (bp == NULL) breakpoint_handler()
819 info = counter_arch_bp(bp); breakpoint_handler()
832 perf_bp_event(bp, regs); breakpoint_handler()
833 if (!bp->overflow_handler) breakpoint_handler()
834 enable_single_step(bp, addr); breakpoint_handler()
841 disable_single_step(bp); breakpoint_handler()
1122 void hw_breakpoint_pmu_read(struct perf_event *bp) hw_breakpoint_pmu_read() argument
H A Dptrace.c385 static void ptrace_hbptriggered(struct perf_event *bp, ptrace_hbptriggered() argument
389 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); ptrace_hbptriggered()
395 if (current->thread.debug.hbp[i] == bp) ptrace_hbptriggered()
477 struct perf_event *bp; ptrace_gethbpregs() local
489 bp = tsk->thread.debug.hbp[idx]; ptrace_gethbpregs()
490 if (!bp) { ptrace_gethbpregs()
495 arch_ctrl = counter_arch_bp(bp)->ctrl; ptrace_gethbpregs()
505 reg = bp->attr.bp_addr; ptrace_gethbpregs()
523 struct perf_event *bp; ptrace_sethbpregs() local
545 bp = tsk->thread.debug.hbp[idx]; ptrace_sethbpregs()
546 if (!bp) { ptrace_sethbpregs()
547 bp = ptrace_hbp_create(tsk, implied_type); ptrace_sethbpregs()
548 if (IS_ERR(bp)) { ptrace_sethbpregs()
549 ret = PTR_ERR(bp); ptrace_sethbpregs()
552 tsk->thread.debug.hbp[idx] = bp; ptrace_sethbpregs()
555 attr = bp->attr; ptrace_sethbpregs()
577 ret = modify_user_hw_breakpoint(bp, &attr); ptrace_sethbpregs()
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
H A Di40e_hmc.c94 sd_entry->u.bp.addr = mem; i40e_add_sd_table_entry()
95 sd_entry->u.bp.sd_pd_index = sd_index; i40e_add_sd_table_entry()
105 I40E_INC_BP_REFCNT(&sd_entry->u.bp); i40e_add_sd_table_entry()
174 pd_entry->bp.addr = *page; i40e_add_pd_table_entry()
175 pd_entry->bp.sd_pd_index = pd_index; i40e_add_pd_table_entry()
176 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; i40e_add_pd_table_entry()
190 I40E_INC_BP_REFCNT(&pd_entry->bp); i40e_add_pd_table_entry()
239 I40E_DEC_BP_REFCNT(&pd_entry->bp); i40e_remove_pd_bp()
240 if (pd_entry->bp.ref_cnt) i40e_remove_pd_bp()
253 ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); i40e_remove_pd_bp()
275 I40E_DEC_BP_REFCNT(&sd_entry->u.bp); i40e_prep_remove_sd_bp()
276 if (sd_entry->u.bp.ref_cnt) { i40e_prep_remove_sd_bp()
308 return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr); i40e_remove_sd_bp_new()
H A Di40e_hmc.h63 struct i40e_hmc_bp bp; member in struct:i40e_hmc_pd_entry
84 struct i40e_hmc_bp bp; member in union:i40e_hmc_sd_entry::__anon7016
109 #define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
113 #define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/linux-4.4.14/drivers/scsi/libfc/
H A Dfc_frame.c38 const u8 *bp; fc_frame_crc_check() local
44 bp = (const u8 *) fr_hdr(fp); fc_frame_crc_check()
45 crc = ~crc32(~0, bp, len); fc_frame_crc_check()
H A Dfc_disc.c396 char *bp; fc_disc_gpn_ft_parse() local
409 bp = buf; fc_disc_gpn_ft_parse()
411 np = (struct fc_gpn_ft_resp *)bp; fc_disc_gpn_ft_parse()
422 memcpy((char *)np + tlen, bp, plen); fc_disc_gpn_ft_parse()
425 * Set bp so that the loop below will advance it to the fc_disc_gpn_ft_parse()
428 bp -= tlen; fc_disc_gpn_ft_parse()
438 * Normally, np == bp and plen == len, but from the partial case above, fc_disc_gpn_ft_parse()
439 * bp, len describe the overall buffer, and np, plen describe the fc_disc_gpn_ft_parse()
467 bp += sizeof(*np); fc_disc_gpn_ft_parse()
468 np = (struct fc_gpn_ft_resp *)bp; fc_disc_gpn_ft_parse()
/linux-4.4.14/arch/x86/kvm/
H A Dtss.h49 u16 bp; member in struct:tss_segment_16
/linux-4.4.14/net/sunrpc/
H A Dcache.c754 char *bp = crq->buf; cache_request() local
757 detail->cache_request(detail, crq->item, &bp, &len); cache_request()
1076 char *bp = *bpp; qword_add() local
1082 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); qword_add()
1084 bp += len; qword_add()
1087 bp += ret; qword_add()
1089 *bp++ = ' '; qword_add()
1092 *bpp = bp; qword_add()
1099 char *bp = *bpp; qword_addhex() local
1105 *bp++ = '\\'; qword_addhex()
1106 *bp++ = 'x'; qword_addhex()
1109 bp = hex_byte_pack(bp, *buf++); qword_addhex()
1116 *bp++ = ' '; qword_addhex()
1119 *bpp = bp; qword_addhex()
1220 char *bp = *bpp; qword_get() local
1223 while (*bp == ' ') bp++; qword_get()
1225 if (bp[0] == '\\' && bp[1] == 'x') { qword_get()
1227 bp += 2; qword_get()
1231 h = hex_to_bin(bp[0]); qword_get()
1235 l = hex_to_bin(bp[1]); qword_get()
1240 bp += 2; qword_get()
1245 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { qword_get()
1246 if (*bp == '\\' && qword_get()
1247 isodigit(bp[1]) && (bp[1] <= '3') && qword_get()
1248 isodigit(bp[2]) && qword_get()
1249 isodigit(bp[3])) { qword_get()
1250 int byte = (*++bp -'0'); qword_get()
1251 bp++; qword_get()
1252 byte = (byte << 3) | (*bp++ - '0'); qword_get()
1253 byte = (byte << 3) | (*bp++ - '0'); qword_get()
1257 *dest++ = *bp++; qword_get()
1263 if (*bp != ' ' && *bp != '\n' && *bp != '\0') qword_get()
1265 while (*bp == ' ') bp++; qword_get()
1266 *bpp = bp; qword_get()
1450 char *bp, *ep; write_flush() local
1462 bp = tbuf; write_flush()
1463 then = get_expiry(&bp); write_flush()
/linux-4.4.14/drivers/net/ethernet/dec/tulip/
H A Deeprom.c304 unsigned char *bp = leaf->leafdata; tulip_parse_eeprom() local
307 bp[0], bp[1], bp[2 + bp[1]*2], tulip_parse_eeprom()
308 bp[5 + bp[2 + bp[1]*2]*2], tulip_parse_eeprom()
309 bp[4 + bp[2 + bp[1]*2]*2]); tulip_parse_eeprom()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dqsfp.c68 int offset, void *bp, int len) __i2c_write()
72 u8 *buff = bp; __i2c_write()
103 void *bp, int len) i2c_write()
110 ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len); i2c_write()
121 int offset, void *bp, int len) __i2c_read()
126 u8 *buff = bp; __i2c_read()
175 void *bp, int len) i2c_read()
182 ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len); i2c_read()
189 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, qsfp_write() argument
226 ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count, qsfp_write()
242 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp, qsfp_read() argument
278 ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count, qsfp_read()
67 __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) __i2c_write() argument
102 i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) i2c_write() argument
120 __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) __i2c_read() argument
174 i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset, void *bp, int len) i2c_read() argument
H A Dqsfp.h216 int offset, void *bp, int len);
218 int offset, void *bp, int len);
219 int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
221 int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
/linux-4.4.14/drivers/md/
H A Ddm-bufio.h49 struct dm_buffer **bp);
56 struct dm_buffer **bp);
63 struct dm_buffer **bp);
H A Dbitmap.c63 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
66 if (bitmap->bp[page].map) /* page is already allocated, just return */
81 * When this function completes, either bp[page].map or
82 * bp[page].hijacked. In either case, this function will
95 if (!bitmap->bp[page].map)
96 bitmap->bp[page].hijacked = 1;
97 } else if (bitmap->bp[page].map ||
98 bitmap->bp[page].hijacked) {
106 bitmap->bp[page].map = mappage;
119 if (bitmap->bp[page].count) /* page is still busy */ bitmap_checkfree()
124 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ bitmap_checkfree()
125 bitmap->bp[page].hijacked = 0; bitmap_checkfree()
126 bitmap->bp[page].map = NULL; bitmap_checkfree()
129 ptr = bitmap->bp[page].map; bitmap_checkfree()
130 bitmap->bp[page].map = NULL; bitmap_checkfree()
1170 bitmap->bp[page].count += inc; bitmap_count_page()
1178 struct bitmap_page *bp = &bitmap->bp[page]; bitmap_set_pending() local
1180 if (!bp->pending) bitmap_set_pending()
1181 bp->pending = 1; bitmap_set_pending()
1258 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { bitmap_daemon_work()
1262 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; bitmap_daemon_work()
1332 if (bitmap->bp[page].hijacked ||
1333 bitmap->bp[page].map == NULL)
1345 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1350 &bitmap->bp[page].map)[hi];
1353 &(bitmap->bp[page].map[pageoff]);
1677 struct bitmap_page *bp; bitmap_free() local
1693 bp = bitmap->counts.bp; bitmap_free()
1698 if (bp) /* deallocate the page memory */ bitmap_free()
1700 if (bp[k].map && !bp[k].hijacked) bitmap_free()
1701 kfree(bp[k].map); bitmap_free()
1702 kfree(bp); bitmap_free()
2026 bitmap->counts.bp = new_bp; bitmap_resize()
/linux-4.4.14/sound/pci/emu10k1/
H A Demu10k1_callback.c225 struct best_voice *bp; lookup_voices() local
246 bp = best + V_FREE; lookup_voices()
248 bp = best + V_OFF; lookup_voices()
252 bp = best + V_RELEASED; lookup_voices()
256 bp = best + V_OFF; lookup_voices()
262 bp = best + V_PLAYING; lookup_voices()
267 if (bp != best + V_OFF && bp != best + V_FREE && lookup_voices()
271 bp = best + V_OFF; lookup_voices()
274 if (vp->time < bp->time) { lookup_voices()
275 bp->time = vp->time; lookup_voices()
276 bp->voice = i; lookup_voices()
/linux-4.4.14/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/
H A DEventClass.py57 flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
66 self.bp = bp
/linux-4.4.14/drivers/mtd/nand/
H A Dnand_ecc.c162 const uint32_t *bp = (uint32_t *)buf; __nand_calculate_ecc() local
195 cur = *bp++; __nand_calculate_ecc()
198 cur = *bp++; __nand_calculate_ecc()
201 cur = *bp++; __nand_calculate_ecc()
204 cur = *bp++; __nand_calculate_ecc()
208 cur = *bp++; __nand_calculate_ecc()
212 cur = *bp++; __nand_calculate_ecc()
215 cur = *bp++; __nand_calculate_ecc()
218 cur = *bp++; __nand_calculate_ecc()
222 cur = *bp++; __nand_calculate_ecc()
227 cur = *bp++; __nand_calculate_ecc()
231 cur = *bp++; __nand_calculate_ecc()
235 cur = *bp++; __nand_calculate_ecc()
239 cur = *bp++; __nand_calculate_ecc()
243 cur = *bp++; __nand_calculate_ecc()
246 cur = *bp++; __nand_calculate_ecc()
249 cur = *bp++; __nand_calculate_ecc()
/linux-4.4.14/scripts/dtc/
H A Dtreesource.c170 const char *bp = val.val; write_propval_bytes() local
175 while (m && (m->offset == (bp-val.val))) { write_propval_bytes()
181 fprintf(f, "%02hhx", (unsigned char)(*bp++)); write_propval_bytes()
182 if ((const void *)bp >= propend) write_propval_bytes()
/linux-4.4.14/drivers/isdn/hisax/
H A Dtei.c107 u_char *bp; put_tei_msg() local
113 bp = skb_put(skb, 3); put_tei_msg()
114 bp[0] = (TEI_SAPI << 2); put_tei_msg()
115 bp[1] = (GROUP_TEI << 1) | 0x1; put_tei_msg()
116 bp[2] = UI; put_tei_msg()
117 bp = skb_put(skb, 5); put_tei_msg()
118 bp[0] = TEI_ENTITY_ID; put_tei_msg()
119 bp[1] = ri >> 8; put_tei_msg()
120 bp[2] = ri & 0xff; put_tei_msg()
121 bp[3] = m_id; put_tei_msg()
122 bp[4] = (tei << 1) | 1; put_tei_msg()
/linux-4.4.14/arch/cris/arch-v32/kernel/
H A Dptrace.c103 /* If no h/w bp configured, disable S bit. */ user_disable_single_step()
339 int bp; deconfigure_bp() local
345 for (bp = 0; bp < 6; bp++) { deconfigure_bp()
348 put_debugreg(pid, PT_BP + 3 + (bp * 2), 0); deconfigure_bp()
349 put_debugreg(pid, PT_BP + 4 + (bp * 2), 0); deconfigure_bp()
352 tmp = get_debugreg(pid, PT_BP_CTRL) & ~(3 << (2 + (bp * 4))); deconfigure_bp()
H A Dkgdb.c846 int S, bp, trig_bits = 0, rw_bits = 0; stub_is_stopped() local
861 for (bp = 0; bp < 6; bp++) { stub_is_stopped()
864 int bitpos_trig = 1 + bp * 2; stub_is_stopped()
866 int bitpos_config = 2 + bp * 4; stub_is_stopped()
881 trig_mask |= (1 << bp); stub_is_stopped()
883 if (reg.eda >= bp_d_regs[bp * 2] && stub_is_stopped()
884 reg.eda <= bp_d_regs[bp * 2 + 1]) { stub_is_stopped()
892 if (bp < 6) { stub_is_stopped()
896 for (bp = 0; bp < 6; bp++) { stub_is_stopped()
898 int bitpos_config = 2 + bp * 4; stub_is_stopped()
903 if (trig_mask & (1 << bp)) { stub_is_stopped()
905 if (reg.eda + 31 >= bp_d_regs[bp * 2]) { stub_is_stopped()
908 stopped_data_address = bp_d_regs[bp * 2]; stub_is_stopped()
919 BUG_ON(bp >= 6); stub_is_stopped()
1112 int bp; insert_watchpoint() local
1123 for (bp = 0; bp < 6; bp++) { insert_watchpoint()
1127 if (!(sreg.s0_3 & (0x3 << (2 + (bp * 4))))) { insert_watchpoint()
1132 if (bp > 5) { insert_watchpoint()
1141 sreg.s0_3 |= (1 << (2 + bp * 4)); insert_watchpoint()
1145 sreg.s0_3 |= (2 << (2 + bp * 4)); insert_watchpoint()
1149 bp_d_regs[bp * 2] = addr; insert_watchpoint()
1150 bp_d_regs[bp * 2 + 1] = (addr + len - 1); insert_watchpoint()
1190 int bp; remove_watchpoint() local
1199 for (bp = 0; bp < 6; bp++) { remove_watchpoint()
1200 if (bp_d_regs[bp * 2] == addr && remove_watchpoint()
1201 bp_d_regs[bp * 2 + 1] == (addr + len - 1)) { remove_watchpoint()
1203 int bitpos = 2 + bp * 4; remove_watchpoint()
1218 if (bp > 5) { remove_watchpoint()
1227 sreg.s0_3 &= ~(3 << (2 + (bp * 4))); remove_watchpoint()
1228 bp_d_regs[bp * 2] = 0; remove_watchpoint()
1229 bp_d_regs[bp * 2 + 1] = 0; remove_watchpoint()
/linux-4.4.14/arch/frv/include/asm/
H A Dio.h57 const uint8_t *bp = buf; io_outsb() local
60 __builtin_write8((volatile void __iomem *) __ioaddr, *bp++); io_outsb()
66 const uint16_t *bp = buf; io_outsw() local
69 __builtin_write16((volatile void __iomem *) __ioaddr, (*bp++)); io_outsw()
86 uint8_t *bp = buf; io_insb() local
89 *bp++ = __builtin_read8((volatile void __iomem *) addr); io_insb()
94 uint16_t *bp = buf; io_insw() local
97 *bp++ = __builtin_read16((volatile void __iomem *) addr); io_insw()
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/
H A Di40e_hmc.h63 struct i40e_hmc_bp bp; member in struct:i40e_hmc_pd_entry
84 struct i40e_hmc_bp bp; member in union:i40e_hmc_sd_entry::__anon7048
109 #define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
113 #define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
/linux-4.4.14/drivers/pcmcia/
H A Dm32r_pcc.c153 unsigned char *bp = (unsigned char *)buf; pcc_iorw() local
163 writeb(*bp++, addr); pcc_iorw()
168 *bp++ = readb(addr); pcc_iorw()
173 unsigned short *bp = (unsigned short *)buf; pcc_iorw() local
185 unsigned char *cp = (unsigned char *)bp; pcc_iorw()
189 bp++; pcc_iorw()
192 writew(*bp++, addr); pcc_iorw()
199 unsigned char *cp = (unsigned char *)bp; pcc_iorw()
204 bp++; pcc_iorw()
207 *bp++ = readw(addr); pcc_iorw()
H A Dm32r_cfc.c108 unsigned char *bp = (unsigned char *)buf; pcc_ioread_byte() local
125 *bp++ = readb(addr); pcc_ioread_byte()
133 unsigned short *bp = (unsigned short *)buf; pcc_ioread_word() local
156 *bp++ = readw(addr); pcc_ioread_word()
164 unsigned char *bp = (unsigned char *)buf; pcc_iowrite_byte() local
181 writeb(*bp++, addr); pcc_iowrite_byte()
189 unsigned short *bp = (unsigned short *)buf; pcc_iowrite_word() local
218 writew(*bp++, addr); pcc_iowrite_word()
/linux-4.4.14/drivers/mfd/
H A Dipaq-micro.c38 int i, bp; ipaq_micro_trigger_tx() local
42 bp = 0; ipaq_micro_trigger_tx()
43 tx->buf[bp++] = CHAR_SOF; ipaq_micro_trigger_tx()
46 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx()
49 tx->buf[bp++] = msg->tx_data[i]; ipaq_micro_trigger_tx()
53 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx()
54 tx->len = bp; ipaq_micro_trigger_tx()
H A Dpm8921-core.c68 static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp, pm8xxx_read_block_irq() argument
74 rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); pm8xxx_read_block_irq()
76 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); pm8xxx_read_block_irq()
89 pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp) pm8xxx_config_irq() argument
94 rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); pm8xxx_config_irq()
96 pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); pm8xxx_config_irq()
/linux-4.4.14/sound/isa/sb/
H A Demu8000_callback.c173 struct best *bp; get_voice() local
192 bp = best + OFF; get_voice()
195 bp = best + RELEASED; get_voice()
198 bp = best + OFF; get_voice()
201 bp = best + PLAYING; get_voice()
210 bp = best + OFF; get_voice()
213 if (vp->time < bp->time) { get_voice()
214 bp->time = vp->time; get_voice()
215 bp->voice = i; get_voice()
/linux-4.4.14/drivers/spi/
H A Dspi-tle62x0.c103 char *bp = buf; tle62x0_status_show() local
125 bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2))); tle62x0_status_show()
128 *bp++ = '\n'; tle62x0_status_show()
131 return bp - buf; tle62x0_status_show()
/linux-4.4.14/arch/m68k/68360/
H A Dcommproc.c292 volatile uint *bp; m360_cpm_setbrg() local
296 /* bp = (uint *)&cpmp->cp_brgc1; */ m360_cpm_setbrg()
297 bp = (volatile uint *)(&pquicc->brgc[0].l); m360_cpm_setbrg()
298 bp += brg; m360_cpm_setbrg()
299 *bp = ((BRG_UART_CLK / rate - 1) << 1) | CPM_BRG_EN; m360_cpm_setbrg()
/linux-4.4.14/drivers/tty/serial/
H A Dmpsc.c788 u8 *bp, *bp_p; mpsc_init_rings() local
834 bp = pi->rxb; mpsc_init_rings()
850 bp += MPSC_RXBE_SIZE; mpsc_init_rings()
858 bp = pi->txb; mpsc_init_rings()
869 bp += MPSC_TXBE_SIZE; mpsc_init_rings()
942 u8 *bp; mpsc_rx_intr() local
982 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); mpsc_rx_intr()
983 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, mpsc_rx_intr()
987 invalidate_dcache_range((ulong)bp, mpsc_rx_intr()
988 (ulong)bp + MPSC_RXBE_SIZE); mpsc_rx_intr()
1028 if (uart_handle_sysrq_char(&pi->port, *bp)) { mpsc_rx_intr()
1029 bp++; mpsc_rx_intr()
1044 tty_insert_flip_char(port, *bp, flag); mpsc_rx_intr()
1047 tty_insert_flip_char(port, *bp++, TTY_NORMAL); mpsc_rx_intr()
1117 u8 *bp; mpsc_copy_tx_data() local
1132 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_copy_tx_data()
1133 *bp = pi->port.x_char; mpsc_copy_tx_data()
1142 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_copy_tx_data()
1143 memcpy(bp, &xmit->buf[xmit->tail], i); mpsc_copy_tx_data()
1152 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, mpsc_copy_tx_data()
1156 flush_dcache_range((ulong)bp, mpsc_copy_tx_data()
1157 (ulong)bp + MPSC_TXBE_SIZE); mpsc_copy_tx_data()
1565 u8 *bp; mpsc_get_poll_char() local
1597 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); mpsc_get_poll_char()
1598 dma_cache_sync(pi->port.dev, (void *) bp, mpsc_get_poll_char()
1602 invalidate_dcache_range((ulong)bp, mpsc_get_poll_char()
1603 (ulong)bp + MPSC_RXBE_SIZE); mpsc_get_poll_char()
1608 poll_buf[poll_cnt] = *bp; mpsc_get_poll_char()
1612 poll_buf[poll_cnt] = *bp++; mpsc_get_poll_char()
1712 u8 *bp, *dp, add_cr = 0; mpsc_console_write() local
1729 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); mpsc_console_write()
1750 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, mpsc_console_write()
1754 flush_dcache_range((ulong)bp, mpsc_console_write()
1755 (ulong)bp + MPSC_TXBE_SIZE); mpsc_console_write()
/linux-4.4.14/drivers/parport/
H A Dieee1284_ops.c734 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_write_data() local
746 for (; len > 0; len--, bp++) { parport_ieee1284_epp_write_data()
748 parport_write_data (port, *bp); parport_ieee1284_epp_write_data()
778 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_read_data() local
789 for (; len > 0; len--, bp++) { parport_ieee1284_epp_read_data()
799 *bp = parport_read_data (port); parport_ieee1284_epp_read_data()
822 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_write_addr() local
834 for (; len > 0; len--, bp++) { parport_ieee1284_epp_write_addr()
836 parport_write_data (port, *bp); parport_ieee1284_epp_write_addr()
866 unsigned char *bp = (unsigned char *) buffer; parport_ieee1284_epp_read_addr() local
877 for (; len > 0; len--, bp++) { parport_ieee1284_epp_read_addr()
887 *bp = parport_read_data (port); parport_ieee1284_epp_read_addr()
/linux-4.4.14/scripts/gdb/linux/
H A Dsymbols.py138 for bp in gdb.breakpoints():
139 saved_states.append({'breakpoint': bp, 'enabled': bp.enabled})
/linux-4.4.14/drivers/edac/
H A Dedac_stub.c8 * Borislav Petkov <bp@alien8.de>
/linux-4.4.14/arch/unicore32/include/asm/
H A Dprocessor.h39 struct debug_entry bp[2]; member in struct:debug_info
/linux-4.4.14/arch/x86/math-emu/
H A Dget_address.c37 offsetof(struct pt_regs, bp),
353 address += FPU_info->regs->bp + FPU_info->regs->si; FPU_get_address_16()
358 address += FPU_info->regs->bp + FPU_info->regs->di; FPU_get_address_16()
369 address += FPU_info->regs->bp; FPU_get_address_16()
/linux-4.4.14/arch/arm/mach-mv78xx0/
H A Ddb78x00-bp-setup.c2 * arch/arm/mach-mv78xx0/db78x00-bp-setup.c
/linux-4.4.14/tools/testing/selftests/x86/
H A Dptrace_syscall.c57 register unsigned long bp asm("bp") = args->arg5; do_full_int80()
61 "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)); do_full_int80()
62 args->arg5 = bp; do_full_int80()
/linux-4.4.14/sound/drivers/opl3/
H A Dopl3_midi.c165 struct best *bp; opl3_get_voice() local
182 bp = best; opl3_get_voice()
194 bp++; opl3_get_voice()
200 bp++; opl3_get_voice()
208 bp++; opl3_get_voice()
211 bp++; opl3_get_voice()
214 bp++; opl3_get_voice()
216 if (voice_time < bp->time) { opl3_get_voice()
217 bp->time = voice_time; opl3_get_voice()
218 bp->voice = i; opl3_get_voice()
/linux-4.4.14/drivers/rtc/
H A Drtc-ds1305.c216 u8 *bp = buf; ds1305_set_time() local
225 *bp++ = DS1305_WRITE | DS1305_SEC; ds1305_set_time()
227 *bp++ = bin2bcd(time->tm_sec); ds1305_set_time()
228 *bp++ = bin2bcd(time->tm_min); ds1305_set_time()
229 *bp++ = hour2bcd(ds1305->hr12, time->tm_hour); ds1305_set_time()
230 *bp++ = (time->tm_wday < 7) ? (time->tm_wday + 1) : 1; ds1305_set_time()
231 *bp++ = bin2bcd(time->tm_mday); ds1305_set_time()
232 *bp++ = bin2bcd(time->tm_mon + 1); ds1305_set_time()
233 *bp++ = bin2bcd(time->tm_year - 100); ds1305_set_time()
/linux-4.4.14/drivers/scsi/
H A Dwd33c93.c2061 char *bp; wd33c93_write_info() local
2082 for (bp = buf; *bp; ) { wd33c93_write_info()
2083 while (',' == *bp || ' ' == *bp) wd33c93_write_info()
2084 ++bp; wd33c93_write_info()
2085 if (!strncmp(bp, "debug:", 6)) { wd33c93_write_info()
2086 hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; wd33c93_write_info()
2087 } else if (!strncmp(bp, "disconnect:", 11)) { wd33c93_write_info()
2088 x = simple_strtoul(bp+11, &bp, 0); wd33c93_write_info()
2092 } else if (!strncmp(bp, "period:", 7)) { wd33c93_write_info()
2093 x = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info()
2097 } else if (!strncmp(bp, "resync:", 7)) { wd33c93_write_info()
2098 set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); wd33c93_write_info()
2099 } else if (!strncmp(bp, "proc:", 5)) { wd33c93_write_info()
2100 hd->proc = simple_strtoul(bp+5, &bp, 0); wd33c93_write_info()
2101 } else if (!strncmp(bp, "nodma:", 6)) { wd33c93_write_info()
2102 hd->no_dma = simple_strtoul(bp+6, &bp, 0); wd33c93_write_info()
2103 } else if (!strncmp(bp, "level2:", 7)) { wd33c93_write_info()
2104 hd->level2 = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info()
2105 } else if (!strncmp(bp, "burst:", 6)) { wd33c93_write_info()
2107 simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; wd33c93_write_info()
2108 } else if (!strncmp(bp, "fast:", 5)) { wd33c93_write_info()
2109 x = !!simple_strtol(bp+5, &bp, 0); wd33c93_write_info()
2113 } else if (!strncmp(bp, "nosync:", 7)) { wd33c93_write_info()
2114 x = simple_strtoul(bp+7, &bp, 0); wd33c93_write_info()
/linux-4.4.14/tools/perf/arch/x86/util/
H A Ddwarf-regs.c72 REG_OFFSET_NAME_32("%bp", ebp),
85 REG_OFFSET_NAME_64("%bp", rbp),
/linux-4.4.14/arch/x86/oprofile/
H A Dbacktrace.c74 head = (struct stack_frame_ia32 *) regs->bp; x86_backtrace_32()
/linux-4.4.14/drivers/firmware/
H A Ddmi_scan.c46 const u8 *bp = ((u8 *) dm) + dm->length; dmi_string_nosave() local
50 while (s > 0 && *bp) { dmi_string_nosave()
51 bp += strlen(bp) + 1; dmi_string_nosave()
55 if (*bp != 0) { dmi_string_nosave()
56 size_t len = strlen(bp)+1; dmi_string_nosave()
59 if (!memcmp(bp, dmi_empty_string, cmp_len)) dmi_string_nosave()
61 return bp; dmi_string_nosave()
70 const char *bp = dmi_string_nosave(dm, s); dmi_string() local
74 if (bp == dmi_empty_string) dmi_string()
77 len = strlen(bp) + 1; dmi_string()
80 strcpy(str, bp); dmi_string()

Completed in 7524 milliseconds

12